text
stringlengths
2
1.1M
id
stringlengths
11
117
metadata
dict
__index_level_0__
int64
0
885
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package cmp provides types and functions related to comparing // ordered values. package cmp // Ordered is a constraint that permits any ordered type: any type // that supports the operators < <= >= >. // If future releases of Go add new ordered types, // this constraint will be modified to include them. // // Note that floating-point types may contain NaN ("not-a-number") values. // An operator such as == or < will always report false when // comparing a NaN value with any other value, NaN or not. // See the [Compare] function for a consistent way to compare NaN values. type Ordered interface { ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 | ~string } // Less reports whether x is less than y. // For floating-point types, a NaN is considered less than any non-NaN, // and -0.0 is not less than (is equal to) 0.0. func Less[T Ordered](x, y T) bool { return (isNaN(x) && !isNaN(y)) || x < y } // Compare returns // // -1 if x is less than y, // 0 if x equals y, // +1 if x is greater than y. // // For floating-point types, a NaN is considered less than any non-NaN, // a NaN is considered equal to a NaN, and -0.0 is equal to 0.0. func Compare[T Ordered](x, y T) int { xNaN := isNaN(x) yNaN := isNaN(y) if xNaN { if yNaN { return 0 } return -1 } if yNaN { return +1 } if x < y { return -1 } if x > y { return +1 } return 0 } // isNaN reports whether x is a NaN without requiring the math package. // This will always return false if T is not floating-point. func isNaN[T Ordered](x T) bool { return x != x } // Or returns the first of its arguments that is not equal to the zero value. // If no argument is non-zero, it returns the zero value. func Or[T comparable](vals ...T) T { var zero T for _, val := range vals { if val != zero { return val } } return zero }
go/src/cmp/cmp.go/0
{ "file_path": "go/src/cmp/cmp.go", "repo_id": "go", "token_count": 711 }
200
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import ( "errors" "fmt" "io" "math" ) const ( NoCompression = 0 BestSpeed = 1 BestCompression = 9 DefaultCompression = -1 // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman // entropy encoding. This mode is useful in compressing data that has // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) // that lacks an entropy encoder. Compression gains are achieved when // certain bytes in the input stream occur more frequently than others. // // Note that HuffmanOnly produces a compressed output that is // RFC 1951 compliant. That is, any valid DEFLATE decompressor will // continue to be able to decompress this output. HuffmanOnly = -2 ) const ( logWindowSize = 15 windowSize = 1 << logWindowSize windowMask = windowSize - 1 // The LZ77 step produces a sequence of literal tokens and <length, offset> // pair tokens. The offset is also known as distance. The underlying wire // format limits the range of lengths and offsets. For example, there are // 256 legitimate lengths: those in the range [3, 258]. This package's // compressor uses a higher minimum match length, enabling optimizations // such as finding matches via 32-bit loads and compares. baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 minMatchLength = 4 // The smallest match length that the compressor actually emits maxMatchLength = 258 // The largest match length baseMatchOffset = 1 // The smallest match offset maxMatchOffset = 1 << 15 // The largest match offset // The maximum number of tokens we put into a single flate block, just to // stop things from getting too large. maxFlateBlockTokens = 1 << 14 maxStoreBlockSize = 65535 hashBits = 17 // After 17 performance degrades hashSize = 1 << hashBits hashMask = (1 << hashBits) - 1 maxHashOffset = 1 << 24 skipNever = math.MaxInt32 ) type compressionLevel struct { level, good, lazy, nice, chain, fastSkipHashing int } var levels = []compressionLevel{ {0, 0, 0, 0, 0, 0}, // NoCompression. {1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go. // For levels 2-3 we don't bother trying with lazy matches. {2, 4, 0, 16, 8, 5}, {3, 4, 0, 32, 32, 6}, // Levels 4-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". {4, 4, 4, 16, 16, skipNever}, {5, 8, 16, 32, 32, skipNever}, {6, 8, 16, 128, 128, skipNever}, {7, 8, 32, 128, 256, skipNever}, {8, 32, 128, 258, 1024, skipNever}, {9, 32, 258, 258, 4096, skipNever}, } type compressor struct { compressionLevel w *huffmanBitWriter bulkHasher func([]byte, []uint32) // compression algorithm fill func(*compressor, []byte) int // copy data to window step func(*compressor) // process window bestSpeed *deflateFast // Encoder for BestSpeed // Input hash chains // hashHead[hashValue] contains the largest inputIndex with the specified hash value // If hashHead[hashValue] is within the current window, then // hashPrev[hashHead[hashValue] & windowMask] contains the previous index // with the same hash value. chainHead int hashHead [hashSize]uint32 hashPrev [windowSize]uint32 hashOffset int // input window: unprocessed data is window[index:windowEnd] index int window []byte windowEnd int blockStart int // window index where current tokens start byteAvailable bool // if true, still need to process window[index-1]. sync bool // requesting flush // queued output tokens tokens []token // deflate state length int offset int maxInsertIndex int err error // hashMatch must be able to contain hashes for the maximum match length. hashMatch [maxMatchLength - 1]uint32 } func (d *compressor) fillDeflate(b []byte) int { if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { // shift the window by windowSize copy(d.window, d.window[windowSize:2*windowSize]) d.index -= windowSize d.windowEnd -= windowSize if d.blockStart >= windowSize { d.blockStart -= windowSize } else { d.blockStart = math.MaxInt32 } d.hashOffset += windowSize if d.hashOffset > maxHashOffset { delta := d.hashOffset - 1 d.hashOffset -= delta d.chainHead -= delta // Iterate over slices instead of arrays to avoid copying // the entire table onto the stack (Issue #18625). for i, v := range d.hashPrev[:] { if int(v) > delta { d.hashPrev[i] = uint32(int(v) - delta) } else { d.hashPrev[i] = 0 } } for i, v := range d.hashHead[:] { if int(v) > delta { d.hashHead[i] = uint32(int(v) - delta) } else { d.hashHead[i] = 0 } } } } n := copy(d.window[d.windowEnd:], b) d.windowEnd += n return n } func (d *compressor) writeBlock(tokens []token, index int) error { if index > 0 { var window []byte if d.blockStart <= index { window = d.window[d.blockStart:index] } d.blockStart = index d.w.writeBlock(tokens, false, window) return d.w.err } return nil } // fillWindow will fill the current window with the supplied // dictionary and calculate all hashes. // This is much faster than doing a full encode. // Should only be used after a reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only mode. if d.compressionLevel.level < 2 { return } if d.index != 0 || d.windowEnd != 0 { panic("internal error: fillWindow called with stale data") } // If we are given too much, cut it. if len(b) > windowSize { b = b[len(b)-windowSize:] } // Add all to window. n := copy(d.window, b) // Calculate 256 hashes at the time (more L1 cache hits) loops := (n + 256 - minMatchLength) / 256 for j := 0; j < loops; j++ { index := j * 256 end := index + 256 + minMatchLength - 1 if end > n { end = n } toCheck := d.window[index:end] dstSize := len(toCheck) - minMatchLength + 1 if dstSize <= 0 { continue } dst := d.hashMatch[:dstSize] d.bulkHasher(toCheck, dst) for i, val := range dst { di := i + index hh := &d.hashHead[val&hashMask] // Get previous value with the same hash. // Our chain should point to the previous value. d.hashPrev[di&windowMask] = *hh // Set the head of the hash chain to us. *hh = uint32(di + d.hashOffset) } } // Update window information. d.windowEnd = n d.index = n } // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead } win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long nice := len(win) - pos if d.nice < nice { nice = d.nice } // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain length = prevLength if length >= d.good { tries >>= 2 } wEnd := win[pos+length] wPos := win[pos:] minIndex := pos - windowSize for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:], wPos, minMatchLook) if n > length && (n > minMatchLength || pos-i <= 4096) { length = n offset = pos - i ok = true if n >= nice { // The match is good enough that we don't try to find a better one. break } wEnd = win[pos+n] } } if i == minIndex { // hashPrev[i & windowMask] has already been overwritten, so stop now. break } i = int(d.hashPrev[i&windowMask]) - d.hashOffset if i < minIndex || i < 0 { break } } return } func (d *compressor) writeStoredBlock(buf []byte) error { if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { return d.w.err } d.w.writeBytes(buf) return d.w.err } const hashmul = 0x1e35a7bd // hash4 returns a hash representation of the first 4 bytes // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) } // bulkHash4 will compute hashes using the same // algorithm as hash4. func bulkHash4(b []byte, dst []uint32) { if len(b) < minMatchLength { return } hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 dst[0] = (hb * hashmul) >> (32 - hashBits) end := len(b) - minMatchLength + 1 for i := 1; i < end; i++ { hb = (hb << 8) | uint32(b[i+3]) dst[i] = (hb * hashmul) >> (32 - hashBits) } } // matchLen returns the number of matching bytes in a and b // up to length 'max'. Both slices must be at least 'max' // bytes in size. func matchLen(a, b []byte, max int) int { a = a[:max] b = b[:len(a)] for i, av := range a { if b[i] != av { return i } } return max } // encSpeed will compress and store the currently added data, // if enough has been accumulated or we at the end of the stream. // Any error that occurred will be in d.err func (d *compressor) encSpeed() { // We only compress if we have maxStoreBlockSize. if d.windowEnd < maxStoreBlockSize { if !d.sync { return } // Handle small sizes. if d.windowEnd < 128 { switch { case d.windowEnd == 0: return case d.windowEnd <= 16: d.err = d.writeStoredBlock(d.window[:d.windowEnd]) default: d.w.writeBlockHuff(false, d.window[:d.windowEnd]) d.err = d.w.err } d.windowEnd = 0 d.bestSpeed.reset() return } } // Encode the block. d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd]) // If we removed less than 1/16th, Huffman compress the block. if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) { d.w.writeBlockHuff(false, d.window[:d.windowEnd]) } else { d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd]) } d.err = d.w.err d.windowEnd = 0 } func (d *compressor) initDeflate() { d.window = make([]byte, 2*windowSize) d.hashOffset = 1 d.tokens = make([]token, 0, maxFlateBlockTokens+1) d.length = minMatchLength - 1 d.offset = 0 d.byteAvailable = false d.index = 0 d.chainHead = -1 d.bulkHasher = bulkHash4 } func (d *compressor) deflate() { if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { return } d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) Loop: for { if d.index > d.windowEnd { panic("index > windowEnd") } lookahead := d.windowEnd - d.index if lookahead < minMatchLength+maxMatchLength { if !d.sync { break Loop } if d.index > d.windowEnd { panic("index > windowEnd") } if lookahead == 0 { // Flush current output block if any. if d.byteAvailable { // There is still one pending token that needs to be flushed d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1]))) d.byteAvailable = false } if len(d.tokens) > 0 { if d.err = d.writeBlock(d.tokens, d.index); d.err != nil { return } d.tokens = d.tokens[:0] } break Loop } } if d.index < d.maxInsertIndex { // Update the hash hash := hash4(d.window[d.index : d.index+minMatchLength]) hh := &d.hashHead[hash&hashMask] d.chainHead = int(*hh) d.hashPrev[d.index&windowMask] = uint32(d.chainHead) *hh = uint32(d.index + d.hashOffset) } prevLength := d.length prevOffset := d.offset d.length = minMatchLength - 1 d.offset = 0 minIndex := d.index - windowSize if minIndex < 0 { minIndex = 0 } if d.chainHead-d.hashOffset >= minIndex && (d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 || d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) { if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { d.length = newLength d.offset = newOffset } } if d.fastSkipHashing != skipNever && d.length >= minMatchLength || d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength { // There was a match at the previous step, and the current match is // not better. Output the previous match. if d.fastSkipHashing != skipNever { d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset))) } else { d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset))) } // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. if d.length <= d.fastSkipHashing { var newIndex int if d.fastSkipHashing != skipNever { newIndex = d.index + d.length } else { newIndex = d.index + prevLength - 1 } index := d.index for index++; index < newIndex; index++ { if index < d.maxInsertIndex { hash := hash4(d.window[index : index+minMatchLength]) // Get previous value with the same hash. // Our chain should point to the previous value. hh := &d.hashHead[hash&hashMask] d.hashPrev[index&windowMask] = *hh // Set the head of the hash chain to us. *hh = uint32(index + d.hashOffset) } } d.index = index if d.fastSkipHashing == skipNever { d.byteAvailable = false d.length = minMatchLength - 1 } } else { // For matches this long, we don't bother inserting each individual // item into the table. d.index += d.length } if len(d.tokens) == maxFlateBlockTokens { // The block includes the current character if d.err = d.writeBlock(d.tokens, d.index); d.err != nil { return } d.tokens = d.tokens[:0] } } else { if d.fastSkipHashing != skipNever || d.byteAvailable { i := d.index - 1 if d.fastSkipHashing != skipNever { i = d.index } d.tokens = append(d.tokens, literalToken(uint32(d.window[i]))) if len(d.tokens) == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, i+1); d.err != nil { return } d.tokens = d.tokens[:0] } } d.index++ if d.fastSkipHashing == skipNever { d.byteAvailable = true } } } } func (d *compressor) fillStore(b []byte) int { n := copy(d.window[d.windowEnd:], b) d.windowEnd += n return n } func (d *compressor) store() { if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) d.windowEnd = 0 } } // storeHuff compresses and stores the currently added data // when the d.window is full or we are at the end of the stream. // Any error that occurred will be in d.err func (d *compressor) storeHuff() { if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { return } d.w.writeBlockHuff(false, d.window[:d.windowEnd]) d.err = d.w.err d.windowEnd = 0 } func (d *compressor) write(b []byte) (n int, err error) { if d.err != nil { return 0, d.err } n = len(b) for len(b) > 0 { d.step(d) b = b[d.fill(d, b):] if d.err != nil { return 0, d.err } } return n, nil } func (d *compressor) syncFlush() error { if d.err != nil { return d.err } d.sync = true d.step(d) if d.err == nil { d.w.writeStoredHeader(0, false) d.w.flush() d.err = d.w.err } d.sync = false return d.err } func (d *compressor) init(w io.Writer, level int) (err error) { d.w = newHuffmanBitWriter(w) switch { case level == NoCompression: d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillStore d.step = (*compressor).store case level == HuffmanOnly: d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillStore d.step = (*compressor).storeHuff case level == BestSpeed: d.compressionLevel = levels[level] d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillStore d.step = (*compressor).encSpeed d.bestSpeed = newDeflateFast() d.tokens = make([]token, maxStoreBlockSize) case level == DefaultCompression: level = 6 fallthrough case 2 <= level && level <= 9: d.compressionLevel = levels[level] d.initDeflate() d.fill = (*compressor).fillDeflate d.step = (*compressor).deflate default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } return nil } func (d *compressor) reset(w io.Writer) { d.w.reset(w) d.sync = false d.err = nil switch d.compressionLevel.level { case NoCompression: d.windowEnd = 0 case BestSpeed: d.windowEnd = 0 d.tokens = d.tokens[:0] d.bestSpeed.reset() default: d.chainHead = -1 for i := range d.hashHead { d.hashHead[i] = 0 } for i := range d.hashPrev { d.hashPrev[i] = 0 } d.hashOffset = 1 d.index, d.windowEnd = 0, 0 d.blockStart, d.byteAvailable = 0, false d.tokens = d.tokens[:0] d.length = minMatchLength - 1 d.offset = 0 d.maxInsertIndex = 0 } } func (d *compressor) close() error { if d.err == errWriterClosed { return nil } if d.err != nil { return d.err } d.sync = true d.step(d) if d.err != nil { return d.err } if d.w.writeStoredHeader(0, true); d.w.err != nil { return d.w.err } d.w.flush() if d.w.err != nil { return d.w.err } d.err = errWriterClosed return nil } // NewWriter returns a new [Writer] compressing data at the given level. // Following zlib, levels range from 1 ([BestSpeed]) to 9 ([BestCompression]); // higher levels typically run slower but compress more. Level 0 // ([NoCompression]) does not attempt any compression; it only adds the // necessary DEFLATE framing. // Level -1 ([DefaultCompression]) uses the default compression level. // Level -2 ([HuffmanOnly]) will use Huffman compression only, giving // a very fast compression for all types of input, but sacrificing considerable // compression efficiency. // // If level is in the range [-2, 9] then the error returned will be nil. // Otherwise the error returned will be non-nil. func NewWriter(w io.Writer, level int) (*Writer, error) { var dw Writer if err := dw.d.init(w, level); err != nil { return nil, err } return &dw, nil } // NewWriterDict is like [NewWriter] but initializes the new // [Writer] with a preset dictionary. The returned [Writer] behaves // as if the dictionary had been written to it without producing // any compressed output. The compressed data written to w // can only be decompressed by a [Reader] initialized with the // same dictionary. func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { dw := &dictWriter{w} zw, err := NewWriter(dw, level) if err != nil { return nil, err } zw.d.fillWindow(dict) zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. return zw, err } type dictWriter struct { w io.Writer } func (w *dictWriter) Write(b []byte) (n int, err error) { return w.w.Write(b) } var errWriterClosed = errors.New("flate: closed writer") // A Writer takes data written to it and writes the compressed // form of that data to an underlying writer (see [NewWriter]). type Writer struct { d compressor dict []byte } // Write writes data to w, which will eventually write the // compressed form of data to its underlying writer. func (w *Writer) Write(data []byte) (n int, err error) { return w.d.write(data) } // Flush flushes any pending data to the underlying writer. // It is useful mainly in compressed network protocols, to ensure that // a remote reader has enough data to reconstruct a packet. // Flush does not return until the data has been written. // Calling Flush when there is no pending data still causes the [Writer] // to emit a sync marker of at least 4 bytes. // If the underlying writer returns an error, Flush returns that error. // // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. func (w *Writer) Flush() error { // For more about flushing: // https://www.bolet.org/~pornin/deflate-flush.html return w.d.syncFlush() } // Close flushes and closes the writer. func (w *Writer) Close() error { return w.d.close() } // Reset discards the writer's state and makes it equivalent to // the result of [NewWriter] or [NewWriterDict] called with dst // and w's level and dictionary. func (w *Writer) Reset(dst io.Writer) { if dw, ok := w.d.w.writer.(*dictWriter); ok { // w was created with NewWriterDict dw.w = dst w.d.reset(dw) w.d.fillWindow(w.dict) } else { // w was created with NewWriter w.d.reset(dst) } }
go/src/compress/flate/deflate.go/0
{ "file_path": "go/src/compress/flate/deflate.go", "repo_id": "go", "token_count": 7988 }
201
//Copyright2009ThGoAuthor.Allrightrrvd. //UofthiourccodigovrndbyBSD-tyl //licnthtcnbfoundinthLICENSEfil. pckgmin import"o" funcmin(){ vrb=mk([]byt,65535) f,_:=o.Crt("huffmn-null-mx.in") f.Writ(b) } ABCDEFGHIJKLMNOPQRSTUVXxyz!"#¤%&/?"
go/src/compress/flate/testdata/huffman-text-shift.in/0
{ "file_path": "go/src/compress/flate/testdata/huffman-text-shift.in", "repo_id": "go", "token_count": 130 }
202
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ring import ( "fmt" "testing" ) // For debugging - keep around. func dump(r *Ring) { if r == nil { fmt.Println("empty") return } i, n := 0, r.Len() for p := r; i < n; p = p.next { fmt.Printf("%4d: %p = {<- %p | %p ->}\n", i, p, p.prev, p.next) i++ } fmt.Println() } func verify(t *testing.T, r *Ring, N int, sum int) { // Len n := r.Len() if n != N { t.Errorf("r.Len() == %d; expected %d", n, N) } // iteration n = 0 s := 0 r.Do(func(p any) { n++ if p != nil { s += p.(int) } }) if n != N { t.Errorf("number of forward iterations == %d; expected %d", n, N) } if sum >= 0 && s != sum { t.Errorf("forward ring sum = %d; expected %d", s, sum) } if r == nil { return } // connections if r.next != nil { var p *Ring // previous element for q := r; p == nil || q != r; q = q.next { if p != nil && p != q.prev { t.Errorf("prev = %p, expected q.prev = %p\n", p, q.prev) } p = q } if p != r.prev { t.Errorf("prev = %p, expected r.prev = %p\n", p, r.prev) } } // Next, Prev if r.Next() != r.next { t.Errorf("r.Next() != r.next") } if r.Prev() != r.prev { t.Errorf("r.Prev() != r.prev") } // Move if r.Move(0) != r { t.Errorf("r.Move(0) != r") } if r.Move(N) != r { t.Errorf("r.Move(%d) != r", N) } if r.Move(-N) != r { t.Errorf("r.Move(%d) != r", -N) } for i := 0; i < 10; i++ { ni := N + i mi := ni % N if r.Move(ni) != r.Move(mi) { t.Errorf("r.Move(%d) != r.Move(%d)", ni, mi) } if r.Move(-ni) != r.Move(-mi) { t.Errorf("r.Move(%d) != r.Move(%d)", -ni, -mi) } } } func TestCornerCases(t *testing.T) { var ( r0 *Ring r1 Ring ) // Basics verify(t, r0, 0, 0) verify(t, &r1, 1, 0) // Insert r1.Link(r0) verify(t, r0, 0, 0) verify(t, &r1, 1, 0) // Insert r1.Link(r0) verify(t, r0, 0, 0) verify(t, &r1, 1, 0) // Unlink r1.Unlink(0) verify(t, &r1, 1, 0) } func makeN(n int) *Ring { r := New(n) for i := 1; i <= n; i++ { r.Value = i r = r.Next() } return r } func sumN(n int) int { return (n*n + n) / 2 } func TestNew(t *testing.T) { for i := 0; i < 10; i++ { r := New(i) verify(t, r, i, -1) } for i := 0; i < 10; i++ { r := makeN(i) verify(t, r, i, sumN(i)) } } func TestLink1(t *testing.T) { r1a := makeN(1) var r1b Ring r2a := r1a.Link(&r1b) verify(t, r2a, 2, 1) if r2a != r1a { t.Errorf("a) 2-element link failed") } r2b := r2a.Link(r2a.Next()) verify(t, r2b, 2, 1) if r2b != r2a.Next() { t.Errorf("b) 2-element link failed") } r1c := r2b.Link(r2b) verify(t, r1c, 1, 1) verify(t, r2b, 1, 0) } func TestLink2(t *testing.T) { var r0 *Ring r1a := &Ring{Value: 42} r1b := &Ring{Value: 77} r10 := makeN(10) r1a.Link(r0) verify(t, r1a, 1, 42) r1a.Link(r1b) verify(t, r1a, 2, 42+77) r10.Link(r0) verify(t, r10, 10, sumN(10)) r10.Link(r1a) verify(t, r10, 12, sumN(10)+42+77) } func TestLink3(t *testing.T) { var r Ring n := 1 for i := 1; i < 10; i++ { n += i verify(t, r.Link(New(i)), n, -1) } } func TestUnlink(t *testing.T) { r10 := makeN(10) s10 := r10.Move(6) sum10 := sumN(10) verify(t, r10, 10, sum10) verify(t, s10, 10, sum10) r0 := r10.Unlink(0) verify(t, r0, 0, 0) r1 := r10.Unlink(1) verify(t, r1, 1, 2) verify(t, r10, 9, sum10-2) r9 := r10.Unlink(9) verify(t, r9, 9, sum10-2) verify(t, r10, 9, sum10-2) } func TestLinkUnlink(t *testing.T) { for i := 1; i < 4; i++ { ri := New(i) for j := 0; j < i; j++ { rj := ri.Unlink(j) verify(t, rj, j, -1) verify(t, ri, i-j, -1) ri.Link(rj) verify(t, ri, i, -1) } } } // Test that calling Move() on an empty Ring initializes it. func TestMoveEmptyRing(t *testing.T) { var r Ring r.Move(1) verify(t, &r, 1, 0) }
go/src/container/ring/ring_test.go/0
{ "file_path": "go/src/container/ring/ring_test.go", "repo_id": "go", "token_count": 2083 }
203
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !purego package aes import ( "crypto/cipher" "crypto/internal/alias" ) // Assert that aesCipherAsm implements the cbcEncAble and cbcDecAble interfaces. var _ cbcEncAble = (*aesCipherAsm)(nil) var _ cbcDecAble = (*aesCipherAsm)(nil) type cbc struct { b *aesCipherAsm c code iv [BlockSize]byte } func (b *aesCipherAsm) NewCBCEncrypter(iv []byte) cipher.BlockMode { var c cbc c.b = b c.c = b.function copy(c.iv[:], iv) return &c } func (b *aesCipherAsm) NewCBCDecrypter(iv []byte) cipher.BlockMode { var c cbc c.b = b c.c = b.function + 128 // decrypt function code is encrypt + 128 copy(c.iv[:], iv) return &c } func (x *cbc) BlockSize() int { return BlockSize } // cryptBlocksChain invokes the cipher message with chaining (KMC) instruction // with the given function code. The length must be a multiple of BlockSize (16). // //go:noescape func cryptBlocksChain(c code, iv, key, dst, src *byte, length int) func (x *cbc) CryptBlocks(dst, src []byte) { if len(src)%BlockSize != 0 { panic("crypto/cipher: input not full blocks") } if len(dst) < len(src) { panic("crypto/cipher: output smaller than input") } if alias.InexactOverlap(dst[:len(src)], src) { panic("crypto/cipher: invalid buffer overlap") } if len(src) > 0 { cryptBlocksChain(x.c, &x.iv[0], &x.b.key[0], &dst[0], &src[0], len(src)) } } func (x *cbc) SetIV(iv []byte) { if len(iv) != BlockSize { panic("cipher: incorrect length IV") } copy(x.iv[:], iv) }
go/src/crypto/aes/cbc_s390x.go/0
{ "file_path": "go/src/crypto/aes/cbc_s390x.go", "repo_id": "go", "token_count": 653 }
204
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build (goexperiment.boringcrypto && !boringcrypto) || (!goexperiment.boringcrypto && boringcrypto) package boring_test import "testing" func TestNotBoring(t *testing.T) { t.Error("goexperiment.boringcrypto and boringcrypto should be equivalent build tags") }
go/src/crypto/boring/notboring_test.go/0
{ "file_path": "go/src/crypto/boring/notboring_test.go", "repo_id": "go", "token_count": 130 }
205
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ecdh import ( "crypto/internal/edwards25519/field" "crypto/internal/randutil" "errors" "io" ) var ( x25519PublicKeySize = 32 x25519PrivateKeySize = 32 x25519SharedSecretSize = 32 ) // X25519 returns a [Curve] which implements the X25519 function over Curve25519 // (RFC 7748, Section 5). // // Multiple invocations of this function will return the same value, so it can // be used for equality checks and switch statements. func X25519() Curve { return x25519 } var x25519 = &x25519Curve{} type x25519Curve struct{} func (c *x25519Curve) String() string { return "X25519" } func (c *x25519Curve) GenerateKey(rand io.Reader) (*PrivateKey, error) { key := make([]byte, x25519PrivateKeySize) randutil.MaybeReadByte(rand) if _, err := io.ReadFull(rand, key); err != nil { return nil, err } return c.NewPrivateKey(key) } func (c *x25519Curve) NewPrivateKey(key []byte) (*PrivateKey, error) { if len(key) != x25519PrivateKeySize { return nil, errors.New("crypto/ecdh: invalid private key size") } return &PrivateKey{ curve: c, privateKey: append([]byte{}, key...), }, nil } func (c *x25519Curve) privateKeyToPublicKey(key *PrivateKey) *PublicKey { if key.curve != c { panic("crypto/ecdh: internal error: converting the wrong key type") } k := &PublicKey{ curve: key.curve, publicKey: make([]byte, x25519PublicKeySize), } x25519Basepoint := [32]byte{9} x25519ScalarMult(k.publicKey, key.privateKey, x25519Basepoint[:]) return k } func (c *x25519Curve) NewPublicKey(key []byte) (*PublicKey, error) { if len(key) != x25519PublicKeySize { return nil, errors.New("crypto/ecdh: invalid public key") } return &PublicKey{ curve: c, publicKey: append([]byte{}, key...), }, nil } func (c *x25519Curve) ecdh(local *PrivateKey, remote *PublicKey) ([]byte, error) { out := make([]byte, x25519SharedSecretSize) x25519ScalarMult(out, local.privateKey, remote.publicKey) if isZero(out) { return nil, errors.New("crypto/ecdh: bad X25519 remote ECDH input: low order point") } return out, nil } func x25519ScalarMult(dst, scalar, point []byte) { var e [32]byte copy(e[:], scalar[:]) e[0] &= 248 e[31] &= 127 e[31] |= 64 var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element x1.SetBytes(point[:]) x2.One() x3.Set(&x1) z3.One() swap := 0 for pos := 254; pos >= 0; pos-- { b := e[pos/8] >> uint(pos&7) b &= 1 swap ^= int(b) x2.Swap(&x3, swap) z2.Swap(&z3, swap) swap = int(b) tmp0.Subtract(&x3, &z3) tmp1.Subtract(&x2, &z2) x2.Add(&x2, &z2) z2.Add(&x3, &z3) z3.Multiply(&tmp0, &x2) z2.Multiply(&z2, &tmp1) tmp0.Square(&tmp1) tmp1.Square(&x2) x3.Add(&z3, &z2) z2.Subtract(&z3, &z2) x2.Multiply(&tmp1, &tmp0) tmp1.Subtract(&tmp1, &tmp0) z2.Square(&z2) z3.Mult32(&tmp1, 121666) x3.Square(&x3) tmp0.Add(&tmp0, &z3) z3.Multiply(&x1, &z2) z2.Multiply(&tmp1, &tmp0) } x2.Swap(&x3, swap) z2.Swap(&z3, swap) z2.Invert(&z2) x2.Multiply(&x2, &z2) copy(dst[:], x2.Bytes()) }
go/src/crypto/ecdh/x25519.go/0
{ "file_path": "go/src/crypto/ecdh/x25519.go", "repo_id": "go", "token_count": 1414 }
206
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !purego #include "textflag.h" // func addMulVVW1024(z, x *uint, y uint) (c uint) TEXT ·addMulVVW1024(SB), $0-16 MOVL $32, BX JMP addMulVVWx(SB) // func addMulVVW1536(z, x *uint, y uint) (c uint) TEXT ·addMulVVW1536(SB), $0-16 MOVL $48, BX JMP addMulVVWx(SB) // func addMulVVW2048(z, x *uint, y uint) (c uint) TEXT ·addMulVVW2048(SB), $0-16 MOVL $64, BX JMP addMulVVWx(SB) TEXT addMulVVWx(SB), NOFRAME|NOSPLIT, $0 MOVL z+0(FP), DI MOVL x+4(FP), SI MOVL y+8(FP), BP LEAL (DI)(BX*4), DI LEAL (SI)(BX*4), SI NEGL BX // i = -n MOVL $0, CX // c = 0 JMP E6 L6: MOVL (SI)(BX*4), AX MULL BP ADDL CX, AX ADCL $0, DX ADDL AX, (DI)(BX*4) ADCL $0, DX MOVL DX, CX ADDL $1, BX // i++ E6: CMPL BX, $0 // i < 0 JL L6 MOVL CX, c+12(FP) RET
go/src/crypto/internal/bigmod/nat_386.s/0
{ "file_path": "go/src/crypto/internal/bigmod/nat_386.s", "repo_id": "go", "token_count": 509 }
207
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bcache import ( "fmt" "runtime" "sync" "sync/atomic" "testing" ) var registeredCache Cache[int, int32] func init() { registeredCache.Register() } var seq atomic.Uint32 func next[T int | int32]() *T { x := new(T) *x = T(seq.Add(1)) return x } func str[T int | int32](x *T) string { if x == nil { return "nil" } return fmt.Sprint(*x) } func TestCache(t *testing.T) { // Use unregistered cache for functionality tests, // to keep the runtime from clearing behind our backs. c := new(Cache[int, int32]) // Create many entries. m := make(map[*int]*int32) for i := 0; i < 10000; i++ { k := next[int]() v := next[int32]() m[k] = v c.Put(k, v) } // Overwrite a random 20% of those. n := 0 for k := range m { v := next[int32]() m[k] = v c.Put(k, v) if n++; n >= 2000 { break } } // Check results. for k, v := range m { if cv := c.Get(k); cv != v { t.Fatalf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v)) } } c.Clear() for k := range m { if cv := c.Get(k); cv != nil { t.Fatalf("after GC, c.Get(%v) = %v, want nil", str(k), str(cv)) } } // Check that registered cache is cleared at GC. c = &registeredCache for k, v := range m { c.Put(k, v) } runtime.GC() for k := range m { if cv := c.Get(k); cv != nil { t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv)) } } // Check that cache works for concurrent access. // Lists are discarded if they reach 1000 entries, // and there are cacheSize list heads, so we should be // able to do 100 * cacheSize entries with no problem at all. c = new(Cache[int, int32]) var barrier, wg sync.WaitGroup const N = 100 barrier.Add(N) wg.Add(N) var lost int32 for i := 0; i < N; i++ { go func() { defer wg.Done() m := make(map[*int]*int32) for j := 0; j < cacheSize; j++ { k, v := next[int](), next[int32]() m[k] = v c.Put(k, v) } barrier.Done() barrier.Wait() for k, v := range m { if cv := c.Get(k); cv != v { t.Errorf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v)) atomic.AddInt32(&lost, +1) } } }() } wg.Wait() if lost != 0 { t.Errorf("lost %d entries", lost) } }
go/src/crypto/internal/boring/bcache/cache_test.go/0
{ "file_path": "go/src/crypto/internal/boring/bcache/cache_test.go", "repo_id": "go", "token_count": 1059 }
208
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan package boring // #include "goboringcrypto.h" import "C" import "unsafe" type randReader int func (randReader) Read(b []byte) (int, error) { // Note: RAND_bytes should never fail; the return value exists only for historical reasons. // We check it even so. if len(b) > 0 && C._goboringcrypto_RAND_bytes((*C.uint8_t)(unsafe.Pointer(&b[0])), C.size_t(len(b))) == 0 { return 0, fail("RAND_bytes") } return len(b), nil } const RandReader = randReader(0)
go/src/crypto/internal/boring/rand.go/0
{ "file_path": "go/src/crypto/internal/boring/rand.go", "repo_id": "go", "token_count": 240 }
209
// Copyright (c) 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package field implements fast arithmetic modulo 2^255-19. package field import ( "crypto/subtle" "errors" "internal/byteorder" "math/bits" ) // Element represents an element of the field GF(2^255-19). Note that this // is not a cryptographically secure group, and should only be used to interact // with edwards25519.Point coordinates. // // This type works similarly to math/big.Int, and all arguments and receivers // are allowed to alias. // // The zero value is a valid zero element. type Element struct { // An element t represents the integer // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 // // Between operations, all limbs are expected to be lower than 2^52. l0 uint64 l1 uint64 l2 uint64 l3 uint64 l4 uint64 } const maskLow51Bits uint64 = (1 << 51) - 1 var feZero = &Element{0, 0, 0, 0, 0} // Zero sets v = 0, and returns v. func (v *Element) Zero() *Element { *v = *feZero return v } var feOne = &Element{1, 0, 0, 0, 0} // One sets v = 1, and returns v. func (v *Element) One() *Element { *v = *feOne return v } // reduce reduces v modulo 2^255 - 19 and returns it. func (v *Element) reduce() *Element { v.carryPropagate() // After the light reduction we now have a field element representation // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. c := (v.l0 + 19) >> 51 c = (v.l1 + c) >> 51 c = (v.l2 + c) >> 51 c = (v.l3 + c) >> 51 c = (v.l4 + c) >> 51 // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's // effectively applying the reduction identity to the carry. v.l0 += 19 * c v.l1 += v.l0 >> 51 v.l0 = v.l0 & maskLow51Bits v.l2 += v.l1 >> 51 v.l1 = v.l1 & maskLow51Bits v.l3 += v.l2 >> 51 v.l2 = v.l2 & maskLow51Bits v.l4 += v.l3 >> 51 v.l3 = v.l3 & maskLow51Bits // no additional carry v.l4 = v.l4 & maskLow51Bits return v } // Add sets v = a + b, and returns v. func (v *Element) Add(a, b *Element) *Element { v.l0 = a.l0 + b.l0 v.l1 = a.l1 + b.l1 v.l2 = a.l2 + b.l2 v.l3 = a.l3 + b.l3 v.l4 = a.l4 + b.l4 // Using the generic implementation here is actually faster than the // assembly. Probably because the body of this function is so simple that // the compiler can figure out better optimizations by inlining the carry // propagation. return v.carryPropagateGeneric() } // Subtract sets v = a - b, and returns v. func (v *Element) Subtract(a, b *Element) *Element { // We first add 2 * p, to guarantee the subtraction won't underflow, and // then subtract b (which can be up to 2^255 + 2^13 * 19). v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 return v.carryPropagate() } // Negate sets v = -a, and returns v. func (v *Element) Negate(a *Element) *Element { return v.Subtract(feZero, a) } // Invert sets v = 1/z mod p, and returns v. // // If z == 0, Invert returns v = 0. func (v *Element) Invert(z *Element) *Element { // Inversion is implemented as exponentiation with exponent p − 2. It uses the // same sequence of 255 squarings and 11 multiplications as [Curve25519]. var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element z2.Square(z) // 2 t.Square(&z2) // 4 t.Square(&t) // 8 z9.Multiply(&t, z) // 9 z11.Multiply(&z9, &z2) // 11 t.Square(&z11) // 22 z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 t.Square(&z2_5_0) // 2^6 - 2^1 for i := 0; i < 4; i++ { t.Square(&t) // 2^10 - 2^5 } z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 t.Square(&z2_10_0) // 2^11 - 2^1 for i := 0; i < 9; i++ { t.Square(&t) // 2^20 - 2^10 } z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 t.Square(&z2_20_0) // 2^21 - 2^1 for i := 0; i < 19; i++ { t.Square(&t) // 2^40 - 2^20 } t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 t.Square(&t) // 2^41 - 2^1 for i := 0; i < 9; i++ { t.Square(&t) // 2^50 - 2^10 } z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 t.Square(&z2_50_0) // 2^51 - 2^1 for i := 0; i < 49; i++ { t.Square(&t) // 2^100 - 2^50 } z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 t.Square(&z2_100_0) // 2^101 - 2^1 for i := 0; i < 99; i++ { t.Square(&t) // 2^200 - 2^100 } t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 t.Square(&t) // 2^201 - 2^1 for i := 0; i < 49; i++ { t.Square(&t) // 2^250 - 2^50 } t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 t.Square(&t) // 2^251 - 2^1 t.Square(&t) // 2^252 - 2^2 t.Square(&t) // 2^253 - 2^3 t.Square(&t) // 2^254 - 2^4 t.Square(&t) // 2^255 - 2^5 return v.Multiply(&t, &z11) // 2^255 - 21 } // Set sets v = a, and returns v. func (v *Element) Set(a *Element) *Element { *v = *a return v } // SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is // not of the right length, SetBytes returns nil and an error, and the // receiver is unchanged. // // Consistent with RFC 7748, the most significant bit (the high bit of the // last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) // are accepted. Note that this is laxer than specified by RFC 8032, but // consistent with most Ed25519 implementations. func (v *Element) SetBytes(x []byte) (*Element, error) { if len(x) != 32 { return nil, errors.New("edwards25519: invalid field element input size") } // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). v.l0 = byteorder.LeUint64(x[0:8]) v.l0 &= maskLow51Bits // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). v.l1 = byteorder.LeUint64(x[6:14]) >> 3 v.l1 &= maskLow51Bits // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). v.l2 = byteorder.LeUint64(x[12:20]) >> 6 v.l2 &= maskLow51Bits // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). v.l3 = byteorder.LeUint64(x[19:27]) >> 1 v.l3 &= maskLow51Bits // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51). // Note: not bytes 25:33, shift 4, to avoid overread. v.l4 = byteorder.LeUint64(x[24:32]) >> 12 v.l4 &= maskLow51Bits return v, nil } // Bytes returns the canonical 32-byte little-endian encoding of v. func (v *Element) Bytes() []byte { // This function is outlined to make the allocations inline in the caller // rather than happen on the heap. var out [32]byte return v.bytes(&out) } func (v *Element) bytes(out *[32]byte) []byte { t := *v t.reduce() var buf [8]byte for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { bitsOffset := i * 51 byteorder.LePutUint64(buf[:], l<<uint(bitsOffset%8)) for i, bb := range buf { off := bitsOffset/8 + i if off >= len(out) { break } out[off] |= bb } } return out[:] } // Equal returns 1 if v and u are equal, and 0 otherwise. func (v *Element) Equal(u *Element) int { sa, sv := u.Bytes(), v.Bytes() return subtle.ConstantTimeCompare(sa, sv) } // mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } // Select sets v to a if cond == 1, and to b if cond == 0. func (v *Element) Select(a, b *Element, cond int) *Element { m := mask64Bits(cond) v.l0 = (m & a.l0) | (^m & b.l0) v.l1 = (m & a.l1) | (^m & b.l1) v.l2 = (m & a.l2) | (^m & b.l2) v.l3 = (m & a.l3) | (^m & b.l3) v.l4 = (m & a.l4) | (^m & b.l4) return v } // Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. func (v *Element) Swap(u *Element, cond int) { m := mask64Bits(cond) t := m & (v.l0 ^ u.l0) v.l0 ^= t u.l0 ^= t t = m & (v.l1 ^ u.l1) v.l1 ^= t u.l1 ^= t t = m & (v.l2 ^ u.l2) v.l2 ^= t u.l2 ^= t t = m & (v.l3 ^ u.l3) v.l3 ^= t u.l3 ^= t t = m & (v.l4 ^ u.l4) v.l4 ^= t u.l4 ^= t } // IsNegative returns 1 if v is negative, and 0 otherwise. func (v *Element) IsNegative() int { return int(v.Bytes()[0] & 1) } // Absolute sets v to |u|, and returns v. func (v *Element) Absolute(u *Element) *Element { return v.Select(new(Element).Negate(u), u, u.IsNegative()) } // Multiply sets v = x * y, and returns v. func (v *Element) Multiply(x, y *Element) *Element { feMul(v, x, y) return v } // Square sets v = x * x, and returns v. func (v *Element) Square(x *Element) *Element { feSquare(v, x) return v } // Mult32 sets v = x * y, and returns v. func (v *Element) Mult32(x *Element, y uint32) *Element { x0lo, x0hi := mul51(x.l0, y) x1lo, x1hi := mul51(x.l1, y) x2lo, x2hi := mul51(x.l2, y) x3lo, x3hi := mul51(x.l3, y) x4lo, x4hi := mul51(x.l4, y) v.l0 = x0lo + 19*x4hi // carried over per the reduction identity v.l1 = x1lo + x0hi v.l2 = x2lo + x1hi v.l3 = x3lo + x2hi v.l4 = x4lo + x3hi // The hi portions are going to be only 32 bits, plus any previous excess, // so we can skip the carry propagation. return v } // mul51 returns lo + hi * 2⁵¹ = a * b. func mul51(a uint64, b uint32) (lo uint64, hi uint64) { mh, ml := bits.Mul64(a, uint64(b)) lo = ml & maskLow51Bits hi = (mh << 13) | (ml >> 51) return } // Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. func (v *Element) Pow22523(x *Element) *Element { var t0, t1, t2 Element t0.Square(x) // x^2 t1.Square(&t0) // x^4 t1.Square(&t1) // x^8 t1.Multiply(x, &t1) // x^9 t0.Multiply(&t0, &t1) // x^11 t0.Square(&t0) // x^22 t0.Multiply(&t1, &t0) // x^31 t1.Square(&t0) // x^62 for i := 1; i < 5; i++ { // x^992 t1.Square(&t1) } t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 t1.Square(&t0) // 2^11 - 2 for i := 1; i < 10; i++ { // 2^20 - 2^10 t1.Square(&t1) } t1.Multiply(&t1, &t0) // 2^20 - 1 t2.Square(&t1) // 2^21 - 2 for i := 1; i < 20; i++ { // 2^40 - 2^20 t2.Square(&t2) } t1.Multiply(&t2, &t1) // 2^40 - 1 t1.Square(&t1) // 2^41 - 2 for i := 1; i < 10; i++ { // 2^50 - 2^10 t1.Square(&t1) } t0.Multiply(&t1, &t0) // 2^50 - 1 t1.Square(&t0) // 2^51 - 2 for i := 1; i < 50; i++ { // 2^100 - 2^50 t1.Square(&t1) } t1.Multiply(&t1, &t0) // 2^100 - 1 t2.Square(&t1) // 2^101 - 2 for i := 1; i < 100; i++ { // 2^200 - 2^100 t2.Square(&t2) } t1.Multiply(&t2, &t1) // 2^200 - 1 t1.Square(&t1) // 2^201 - 2 for i := 1; i < 50; i++ { // 2^250 - 2^50 t1.Square(&t1) } t0.Multiply(&t1, &t0) // 2^250 - 1 t0.Square(&t0) // 2^251 - 2 t0.Square(&t0) // 2^252 - 4 return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) } // sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. var sqrtM1 = &Element{1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133} // SqrtRatio sets r to the non-negative square root of the ratio of u and v. // // If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio // sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, // and returns r and 0. func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) { t0 := new(Element) // r = (u * v3) * (u * v7)^((p-5)/8) v2 := new(Element).Square(v) uv3 := new(Element).Multiply(u, t0.Multiply(v2, v)) uv7 := new(Element).Multiply(uv3, t0.Square(v2)) rr := new(Element).Multiply(uv3, t0.Pow22523(uv7)) check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2 uNeg := new(Element).Negate(u) correctSignSqrt := check.Equal(u) flippedSignSqrt := check.Equal(uNeg) flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1)) rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI) r.Absolute(rr) // Choose the nonnegative square root. return r, correctSignSqrt | flippedSignSqrt }
go/src/crypto/internal/edwards25519/field/fe.go/0
{ "file_path": "go/src/crypto/internal/edwards25519/field/fe.go", "repo_id": "go", "token_count": 5650 }
210
// Copyright (c) 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package edwards25519 import ( "testing" "testing/quick" ) var ( // a random scalar generated using dalek. dalekScalar, _ = (&Scalar{}).SetCanonicalBytes([]byte{219, 106, 114, 9, 174, 249, 155, 89, 69, 203, 201, 93, 92, 116, 234, 187, 78, 115, 103, 172, 182, 98, 62, 103, 187, 136, 13, 100, 248, 110, 12, 4}) // the above, times the edwards25519 basepoint. dalekScalarBasepoint, _ = new(Point).SetBytes([]byte{0xf4, 0xef, 0x7c, 0xa, 0x34, 0x55, 0x7b, 0x9f, 0x72, 0x3b, 0xb6, 0x1e, 0xf9, 0x46, 0x9, 0x91, 0x1c, 0xb9, 0xc0, 0x6c, 0x17, 0x28, 0x2d, 0x8b, 0x43, 0x2b, 0x5, 0x18, 0x6a, 0x54, 0x3e, 0x48}) ) func TestScalarMultSmallScalars(t *testing.T) { var z Scalar var p Point p.ScalarMult(&z, B) if I.Equal(&p) != 1 { t.Error("0*B != 0") } checkOnCurve(t, &p) scEight, _ := (&Scalar{}).SetCanonicalBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) p.ScalarMult(scEight, B) if B.Equal(&p) != 1 { t.Error("1*B != 1") } checkOnCurve(t, &p) } func TestScalarMultVsDalek(t *testing.T) { var p Point p.ScalarMult(dalekScalar, B) if dalekScalarBasepoint.Equal(&p) != 1 { t.Error("Scalar mul does not match dalek") } checkOnCurve(t, &p) } func TestBaseMultVsDalek(t *testing.T) { var p Point p.ScalarBaseMult(dalekScalar) if dalekScalarBasepoint.Equal(&p) != 1 { t.Error("Scalar mul does not match dalek") } checkOnCurve(t, &p) } func TestVarTimeDoubleBaseMultVsDalek(t *testing.T) { var p Point var z Scalar p.VarTimeDoubleScalarBaseMult(dalekScalar, B, &z) if dalekScalarBasepoint.Equal(&p) != 1 { t.Error("VarTimeDoubleScalarBaseMult fails with b=0") } checkOnCurve(t, &p) p.VarTimeDoubleScalarBaseMult(&z, B, dalekScalar) if dalekScalarBasepoint.Equal(&p) != 1 { t.Error("VarTimeDoubleScalarBaseMult fails with a=0") } checkOnCurve(t, &p) } func TestScalarMultDistributesOverAdd(t *testing.T) { scalarMultDistributesOverAdd := func(x, y Scalar) bool { var z Scalar z.Add(&x, &y) var p, q, r, check Point p.ScalarMult(&x, B) q.ScalarMult(&y, B) r.ScalarMult(&z, B) check.Add(&p, &q) checkOnCurve(t, &p, &q, &r, &check) return check.Equal(&r) == 1 } if err := quick.Check(scalarMultDistributesOverAdd, quickCheckConfig(32)); err != nil { t.Error(err) } } func TestScalarMultNonIdentityPoint(t *testing.T) { // Check whether p.ScalarMult and q.ScalaBaseMult give the same, // when p and q are originally set to the base point. scalarMultNonIdentityPoint := func(x Scalar) bool { var p, q Point p.Set(B) q.Set(B) p.ScalarMult(&x, B) q.ScalarBaseMult(&x) checkOnCurve(t, &p, &q) return p.Equal(&q) == 1 } if err := quick.Check(scalarMultNonIdentityPoint, quickCheckConfig(32)); err != nil { t.Error(err) } } func TestBasepointTableGeneration(t *testing.T) { // The basepoint table is 32 affineLookupTables, // corresponding to (16^2i)*B for table i. basepointTable := basepointTable() tmp1 := &projP1xP1{} tmp2 := &projP2{} tmp3 := &Point{} tmp3.Set(B) table := make([]affineLookupTable, 32) for i := 0; i < 32; i++ { // Build the table table[i].FromP3(tmp3) // Assert equality with the hardcoded one if table[i] != basepointTable[i] { t.Errorf("Basepoint table %d does not match", i) } // Set p = (16^2)*p = 256*p = 2^8*p tmp2.FromP3(tmp3) for j := 0; j < 7; j++ { tmp1.Double(tmp2) tmp2.FromP1xP1(tmp1) } tmp1.Double(tmp2) tmp3.fromP1xP1(tmp1) checkOnCurve(t, tmp3) } } func TestScalarMultMatchesBaseMult(t *testing.T) { scalarMultMatchesBaseMult := func(x Scalar) bool { var p, q Point p.ScalarMult(&x, B) q.ScalarBaseMult(&x) checkOnCurve(t, &p, &q) return p.Equal(&q) == 1 } if err := quick.Check(scalarMultMatchesBaseMult, quickCheckConfig(32)); err != nil { t.Error(err) } } func TestBasepointNafTableGeneration(t *testing.T) { var table nafLookupTable8 table.FromP3(B) if table != *basepointNafTable() { t.Error("BasepointNafTable does not match") } } func TestVarTimeDoubleBaseMultMatchesBaseMult(t *testing.T) { varTimeDoubleBaseMultMatchesBaseMult := func(x, y Scalar) bool { var p, q1, q2, check Point p.VarTimeDoubleScalarBaseMult(&x, B, &y) q1.ScalarBaseMult(&x) q2.ScalarBaseMult(&y) check.Add(&q1, &q2) checkOnCurve(t, &p, &check, &q1, &q2) return p.Equal(&check) == 1 } if err := quick.Check(varTimeDoubleBaseMultMatchesBaseMult, quickCheckConfig(32)); err != nil { t.Error(err) } } // Benchmarks. func BenchmarkScalarBaseMult(b *testing.B) { var p Point for i := 0; i < b.N; i++ { p.ScalarBaseMult(dalekScalar) } } func BenchmarkScalarMult(b *testing.B) { var p Point for i := 0; i < b.N; i++ { p.ScalarMult(dalekScalar, B) } } func BenchmarkVarTimeDoubleScalarBaseMult(b *testing.B) { var p Point for i := 0; i < b.N; i++ { p.VarTimeDoubleScalarBaseMult(dalekScalar, B, dalekScalar) } }
go/src/crypto/internal/edwards25519/scalarmult_test.go/0
{ "file_path": "go/src/crypto/internal/edwards25519/scalarmult_test.go", "repo_id": "go", "token_count": 2354 }
211
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !purego // This file contains constant-time, 64-bit assembly implementation of // P256. The optimizations performed here are described in detail in: // S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with // 256-bit primes" // http://link.springer.com/article/10.1007%2Fs13389-014-0090-x // https://eprint.iacr.org/2013/816.pdf #include "textflag.h" #define res_ptr R0 #define a_ptr R1 #define b_ptr R2 #define acc0 R3 #define acc1 R4 #define acc2 R5 #define acc3 R6 #define acc4 R7 #define acc5 R8 #define acc6 R9 #define acc7 R10 #define t0 R11 #define t1 R12 #define t2 R13 #define t3 R14 #define const0 R15 #define const1 R16 #define hlp0 R17 #define hlp1 res_ptr #define x0 R19 #define x1 R20 #define x2 R21 #define x3 R22 #define y0 R23 #define y1 R24 #define y2 R25 #define y3 R26 #define const2 t2 #define const3 t3 DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001 DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551 DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84 DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000 DATA p256one<>+0x00(SB)/8, $0x0000000000000001 DATA p256one<>+0x08(SB)/8, $0xffffffff00000000 DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe GLOBL p256const0<>(SB), 8, $8 GLOBL p256const1<>(SB), 8, $8 GLOBL p256ordK0<>(SB), 8, $8 GLOBL p256ord<>(SB), 8, $32 GLOBL p256one<>(SB), 8, $32 /* ---------------------------------------*/ // func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement) TEXT ·p256OrdLittleToBig(SB),NOSPLIT,$0 JMP ·p256BigToLittle(SB) /* ---------------------------------------*/ // func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte) TEXT ·p256OrdBigToLittle(SB),NOSPLIT,$0 JMP ·p256BigToLittle(SB) /* ---------------------------------------*/ // func p256LittleToBig(res *[32]byte, in *p256Element) TEXT ·p256LittleToBig(SB),NOSPLIT,$0 JMP ·p256BigToLittle(SB) /* ---------------------------------------*/ // func p256BigToLittle(res *p256Element, in *[32]byte) TEXT ·p256BigToLittle(SB),NOSPLIT,$0 MOVD res+0(FP), res_ptr MOVD in+8(FP), a_ptr LDP 0*16(a_ptr), (acc0, acc1) LDP 1*16(a_ptr), (acc2, acc3) REV acc0, acc0 REV acc1, acc1 REV acc2, acc2 REV acc3, acc3 STP (acc3, acc2), 0*16(res_ptr) STP (acc1, acc0), 1*16(res_ptr) RET /* ---------------------------------------*/ // func p256MovCond(res, a, b *P256Point, cond int) // If cond == 0 res=b, else res=a TEXT ·p256MovCond(SB),NOSPLIT,$0 MOVD res+0(FP), res_ptr MOVD a+8(FP), a_ptr MOVD b+16(FP), b_ptr MOVD cond+24(FP), R3 CMP $0, R3 // Two remarks: // 1) Will want to revisit NEON, when support is better // 2) CSEL might not be constant time on all ARM processors LDP 0*16(a_ptr), (R4, R5) LDP 1*16(a_ptr), (R6, R7) LDP 2*16(a_ptr), (R8, R9) LDP 0*16(b_ptr), (R16, R17) LDP 1*16(b_ptr), (R19, R20) LDP 2*16(b_ptr), (R21, R22) CSEL EQ, R16, R4, R4 CSEL EQ, R17, R5, R5 CSEL EQ, R19, R6, R6 CSEL EQ, R20, R7, R7 CSEL EQ, R21, R8, R8 CSEL EQ, R22, R9, R9 STP (R4, R5), 0*16(res_ptr) STP (R6, R7), 1*16(res_ptr) STP (R8, R9), 2*16(res_ptr) LDP 3*16(a_ptr), (R4, R5) LDP 4*16(a_ptr), (R6, R7) LDP 5*16(a_ptr), (R8, R9) LDP 3*16(b_ptr), (R16, R17) LDP 4*16(b_ptr), (R19, R20) LDP 5*16(b_ptr), (R21, R22) CSEL EQ, R16, R4, R4 CSEL EQ, R17, R5, R5 CSEL EQ, R19, R6, R6 CSEL EQ, R20, R7, R7 CSEL EQ, R21, R8, R8 CSEL EQ, R22, R9, R9 STP (R4, R5), 3*16(res_ptr) STP (R6, R7), 4*16(res_ptr) STP (R8, R9), 5*16(res_ptr) RET /* ---------------------------------------*/ // func p256NegCond(val *p256Element, cond int) TEXT ·p256NegCond(SB),NOSPLIT,$0 MOVD val+0(FP), a_ptr MOVD cond+8(FP), hlp0 MOVD a_ptr, res_ptr // acc = poly MOVD $-1, acc0 MOVD p256const0<>(SB), acc1 MOVD $0, acc2 MOVD p256const1<>(SB), acc3 // Load the original value LDP 0*16(a_ptr), (t0, t1) LDP 1*16(a_ptr), (t2, t3) // Speculatively subtract SUBS t0, acc0 SBCS t1, acc1 SBCS t2, acc2 SBC t3, acc3 // If condition is 0, keep original value CMP $0, hlp0 CSEL EQ, t0, acc0, acc0 CSEL EQ, t1, acc1, acc1 CSEL EQ, t2, acc2, acc2 CSEL EQ, t3, acc3, acc3 // Store result STP (acc0, acc1), 0*16(res_ptr) STP (acc2, acc3), 1*16(res_ptr) RET /* ---------------------------------------*/ // func p256Sqr(res, in *p256Element, n int) TEXT ·p256Sqr(SB),NOSPLIT,$0 MOVD res+0(FP), res_ptr MOVD in+8(FP), a_ptr MOVD n+16(FP), b_ptr MOVD p256const0<>(SB), const0 MOVD p256const1<>(SB), const1 LDP 0*16(a_ptr), (x0, x1) LDP 1*16(a_ptr), (x2, x3) sqrLoop: SUB $1, b_ptr CALL p256SqrInternal<>(SB) MOVD y0, x0 MOVD y1, x1 MOVD y2, x2 MOVD y3, x3 CBNZ b_ptr, sqrLoop STP (y0, y1), 0*16(res_ptr) STP (y2, y3), 1*16(res_ptr) RET /* ---------------------------------------*/ // func p256Mul(res, in1, in2 *p256Element) TEXT ·p256Mul(SB),NOSPLIT,$0 MOVD res+0(FP), res_ptr MOVD in1+8(FP), a_ptr MOVD in2+16(FP), b_ptr MOVD p256const0<>(SB), const0 MOVD p256const1<>(SB), const1 LDP 0*16(a_ptr), (x0, x1) LDP 1*16(a_ptr), (x2, x3) LDP 0*16(b_ptr), (y0, y1) LDP 1*16(b_ptr), (y2, y3) CALL p256MulInternal<>(SB) STP (y0, y1), 0*16(res_ptr) STP (y2, y3), 1*16(res_ptr) RET /* ---------------------------------------*/ // func p256FromMont(res, in *p256Element) TEXT ·p256FromMont(SB),NOSPLIT,$0 MOVD res+0(FP), res_ptr MOVD in+8(FP), a_ptr MOVD p256const0<>(SB), const0 MOVD p256const1<>(SB), const1 LDP 0*16(a_ptr), (acc0, acc1) LDP 1*16(a_ptr), (acc2, acc3) // Only reduce, no multiplications are needed // First reduction step ADDS acc0<<32, acc1, acc1 LSR $32, acc0, t0 MUL acc0, const1, t1 UMULH acc0, const1, acc0 ADCS t0, acc2 ADCS t1, acc3 ADC $0, acc0 // Second reduction step ADDS acc1<<32, acc2, acc2 LSR $32, acc1, t0 MUL acc1, const1, t1 UMULH acc1, const1, acc1 ADCS t0, acc3 ADCS t1, acc0 ADC $0, acc1 // Third reduction step ADDS acc2<<32, acc3, acc3 LSR $32, acc2, t0 MUL acc2, const1, t1 UMULH acc2, const1, acc2 ADCS t0, acc0 ADCS t1, acc1 ADC $0, acc2 // Last reduction step ADDS acc3<<32, acc0, acc0 LSR $32, acc3, t0 MUL acc3, const1, t1 UMULH acc3, const1, acc3 ADCS t0, acc1 ADCS t1, acc2 ADC $0, acc3 SUBS $-1, acc0, t0 SBCS const0, acc1, t1 SBCS $0, acc2, t2 SBCS const1, acc3, t3 CSEL CS, t0, acc0, acc0 CSEL CS, t1, acc1, acc1 CSEL CS, t2, acc2, acc2 CSEL CS, t3, acc3, acc3 STP (acc0, acc1), 0*16(res_ptr) STP (acc2, acc3), 1*16(res_ptr) RET /* ---------------------------------------*/ // func p256Select(res *P256Point, table *p256Table, idx int) TEXT ·p256Select(SB),NOSPLIT,$0 MOVD idx+16(FP), const0 MOVD table+8(FP), b_ptr MOVD res+0(FP), res_ptr EOR x0, x0, x0 EOR x1, x1, x1 EOR x2, x2, x2 EOR x3, x3, x3 EOR y0, y0, y0 EOR y1, y1, y1 EOR y2, y2, y2 EOR y3, y3, y3 EOR t0, t0, t0 EOR t1, t1, t1 EOR t2, t2, t2 EOR t3, t3, t3 MOVD $0, const1 loop_select: ADD $1, const1 CMP const0, const1 LDP.P 16(b_ptr), (acc0, acc1) CSEL EQ, acc0, x0, x0 CSEL EQ, acc1, x1, x1 LDP.P 16(b_ptr), (acc2, acc3) CSEL EQ, acc2, x2, x2 CSEL EQ, acc3, x3, x3 LDP.P 16(b_ptr), (acc4, acc5) CSEL EQ, acc4, y0, y0 CSEL EQ, acc5, y1, y1 LDP.P 16(b_ptr), (acc6, acc7) CSEL EQ, acc6, y2, y2 CSEL EQ, acc7, y3, y3 LDP.P 16(b_ptr), (acc0, acc1) CSEL EQ, acc0, t0, t0 CSEL EQ, acc1, t1, t1 LDP.P 16(b_ptr), (acc2, acc3) CSEL EQ, acc2, t2, t2 CSEL EQ, acc3, t3, t3 CMP $16, const1 BNE loop_select STP (x0, x1), 0*16(res_ptr) STP (x2, x3), 1*16(res_ptr) STP (y0, y1), 2*16(res_ptr) STP (y2, y3), 3*16(res_ptr) STP (t0, t1), 4*16(res_ptr) STP (t2, t3), 5*16(res_ptr) RET /* ---------------------------------------*/ // func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int) TEXT ·p256SelectAffine(SB),NOSPLIT,$0 MOVD idx+16(FP), t0 MOVD table+8(FP), t1 MOVD res+0(FP), res_ptr EOR x0, x0, x0 EOR x1, x1, x1 EOR x2, x2, x2 EOR x3, x3, x3 EOR y0, y0, y0 EOR y1, y1, y1 EOR y2, y2, y2 EOR y3, y3, y3 MOVD $0, t2 loop_select: ADD $1, t2 CMP t0, t2 LDP.P 16(t1), (acc0, acc1) CSEL EQ, acc0, x0, x0 CSEL EQ, acc1, x1, x1 LDP.P 16(t1), (acc2, acc3) CSEL EQ, acc2, x2, x2 CSEL EQ, acc3, x3, x3 LDP.P 16(t1), (acc4, acc5) CSEL EQ, acc4, y0, y0 CSEL EQ, acc5, y1, y1 LDP.P 16(t1), (acc6, acc7) CSEL EQ, acc6, y2, y2 CSEL EQ, acc7, y3, y3 CMP $32, t2 BNE loop_select STP (x0, x1), 0*16(res_ptr) STP (x2, x3), 1*16(res_ptr) STP (y0, y1), 2*16(res_ptr) STP (y2, y3), 3*16(res_ptr) RET /* ---------------------------------------*/ // func p256OrdSqr(res, in *p256OrdElement, n int) TEXT ·p256OrdSqr(SB),NOSPLIT,$0 MOVD in+8(FP), a_ptr MOVD n+16(FP), b_ptr MOVD p256ordK0<>(SB), hlp1 LDP p256ord<>+0x00(SB), (const0, const1) LDP p256ord<>+0x10(SB), (const2, const3) LDP 0*16(a_ptr), (x0, x1) LDP 1*16(a_ptr), (x2, x3) ordSqrLoop: SUB $1, b_ptr // x[1:] * x[0] MUL x0, x1, acc1 UMULH x0, x1, acc2 MUL x0, x2, t0 ADDS t0, acc2, acc2 UMULH x0, x2, acc3 MUL x0, x3, t0 ADCS t0, acc3, acc3 UMULH x0, x3, acc4 ADC $0, acc4, acc4 // x[2:] * x[1] MUL x1, x2, t0 ADDS t0, acc3 UMULH x1, x2, t1 ADCS t1, acc4 ADC $0, ZR, acc5 MUL x1, x3, t0 ADDS t0, acc4 UMULH x1, x3, t1 ADC t1, acc5 // x[3] * x[2] MUL x2, x3, t0 ADDS t0, acc5 UMULH x2, x3, acc6 ADC $0, acc6 MOVD $0, acc7 // *2 ADDS acc1, acc1 ADCS acc2, acc2 ADCS acc3, acc3 ADCS acc4, acc4 ADCS acc5, acc5 ADCS acc6, acc6 ADC $0, acc7 // Missing products MUL x0, x0, acc0 UMULH x0, x0, t0 ADDS t0, acc1, acc1 MUL x1, x1, t0 ADCS t0, acc2, acc2 UMULH x1, x1, t1 ADCS t1, acc3, acc3 MUL x2, x2, t0 ADCS t0, acc4, acc4 UMULH x2, x2, t1 ADCS t1, acc5, acc5 MUL x3, x3, t0 ADCS t0, acc6, acc6 UMULH x3, x3, t1 ADC t1, acc7, acc7 // First reduction step MUL acc0, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc0, acc0 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc1, acc1 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc2, acc2 UMULH const2, hlp0, acc0 MUL const3, hlp0, t0 ADCS t0, acc3, acc3 UMULH const3, hlp0, hlp0 ADC $0, hlp0 ADDS t1, acc1, acc1 ADCS y0, acc2, acc2 ADCS acc0, acc3, acc3 ADC $0, hlp0, acc0 // Second reduction step MUL acc1, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc1, acc1 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc2, acc2 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc3, acc3 UMULH const2, hlp0, acc1 MUL const3, hlp0, t0 ADCS t0, acc0, acc0 UMULH const3, hlp0, hlp0 ADC $0, hlp0 ADDS t1, acc2, acc2 ADCS y0, acc3, acc3 ADCS acc1, acc0, acc0 ADC $0, hlp0, acc1 // Third reduction step MUL acc2, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc2, acc2 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc3, acc3 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc0, acc0 UMULH const2, hlp0, acc2 MUL const3, hlp0, t0 ADCS t0, acc1, acc1 UMULH const3, hlp0, hlp0 ADC $0, hlp0 ADDS t1, acc3, acc3 ADCS y0, acc0, acc0 ADCS acc2, acc1, acc1 ADC $0, hlp0, acc2 // Last reduction step MUL acc3, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc3, acc3 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc0, acc0 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc1, acc1 UMULH const2, hlp0, acc3 MUL const3, hlp0, t0 ADCS t0, acc2, acc2 UMULH const3, hlp0, hlp0 ADC $0, acc7 ADDS t1, acc0, acc0 ADCS y0, acc1, acc1 ADCS acc3, acc2, acc2 ADC $0, hlp0, acc3 ADDS acc4, acc0, acc0 ADCS acc5, acc1, acc1 ADCS acc6, acc2, acc2 ADCS acc7, acc3, acc3 ADC $0, ZR, acc4 SUBS const0, acc0, y0 SBCS const1, acc1, y1 SBCS const2, acc2, y2 SBCS const3, acc3, y3 SBCS $0, acc4, acc4 CSEL CS, y0, acc0, x0 CSEL CS, y1, acc1, x1 CSEL CS, y2, acc2, x2 CSEL CS, y3, acc3, x3 CBNZ b_ptr, ordSqrLoop MOVD res+0(FP), res_ptr STP (x0, x1), 0*16(res_ptr) STP (x2, x3), 1*16(res_ptr) RET /* ---------------------------------------*/ // func p256OrdMul(res, in1, in2 *p256OrdElement) TEXT ·p256OrdMul(SB),NOSPLIT,$0 MOVD in1+8(FP), a_ptr MOVD in2+16(FP), b_ptr MOVD p256ordK0<>(SB), hlp1 LDP p256ord<>+0x00(SB), (const0, const1) LDP p256ord<>+0x10(SB), (const2, const3) LDP 0*16(a_ptr), (x0, x1) LDP 1*16(a_ptr), (x2, x3) LDP 0*16(b_ptr), (y0, y1) LDP 1*16(b_ptr), (y2, y3) // y[0] * x MUL y0, x0, acc0 UMULH y0, x0, acc1 MUL y0, x1, t0 ADDS t0, acc1 UMULH y0, x1, acc2 MUL y0, x2, t0 ADCS t0, acc2 UMULH y0, x2, acc3 MUL y0, x3, t0 ADCS t0, acc3 UMULH y0, x3, acc4 ADC $0, acc4 // First reduction step MUL acc0, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc0, acc0 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc1, acc1 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc2, acc2 UMULH const2, hlp0, acc0 MUL const3, hlp0, t0 ADCS t0, acc3, acc3 UMULH const3, hlp0, hlp0 ADC $0, acc4 ADDS t1, acc1, acc1 ADCS y0, acc2, acc2 ADCS acc0, acc3, acc3 ADC $0, hlp0, acc0 // y[1] * x MUL y1, x0, t0 ADDS t0, acc1 UMULH y1, x0, t1 MUL y1, x1, t0 ADCS t0, acc2 UMULH y1, x1, hlp0 MUL y1, x2, t0 ADCS t0, acc3 UMULH y1, x2, y0 MUL y1, x3, t0 ADCS t0, acc4 UMULH y1, x3, y1 ADC $0, ZR, acc5 ADDS t1, acc2 ADCS hlp0, acc3 ADCS y0, acc4 ADC y1, acc5 // Second reduction step MUL acc1, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc1, acc1 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc2, acc2 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc3, acc3 UMULH const2, hlp0, acc1 MUL const3, hlp0, t0 ADCS t0, acc0, acc0 UMULH const3, hlp0, hlp0 ADC $0, acc5 ADDS t1, acc2, acc2 ADCS y0, acc3, acc3 ADCS acc1, acc0, acc0 ADC $0, hlp0, acc1 // y[2] * x MUL y2, x0, t0 ADDS t0, acc2 UMULH y2, x0, t1 MUL y2, x1, t0 ADCS t0, acc3 UMULH y2, x1, hlp0 MUL y2, x2, t0 ADCS t0, acc4 UMULH y2, x2, y0 MUL y2, x3, t0 ADCS t0, acc5 UMULH y2, x3, y1 ADC $0, ZR, acc6 ADDS t1, acc3 ADCS hlp0, acc4 ADCS y0, acc5 ADC y1, acc6 // Third reduction step MUL acc2, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc2, acc2 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc3, acc3 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc0, acc0 UMULH const2, hlp0, acc2 MUL const3, hlp0, t0 ADCS t0, acc1, acc1 UMULH const3, hlp0, hlp0 ADC $0, acc6 ADDS t1, acc3, acc3 ADCS y0, acc0, acc0 ADCS acc2, acc1, acc1 ADC $0, hlp0, acc2 // y[3] * x MUL y3, x0, t0 ADDS t0, acc3 UMULH y3, x0, t1 MUL y3, x1, t0 ADCS t0, acc4 UMULH y3, x1, hlp0 MUL y3, x2, t0 ADCS t0, acc5 UMULH y3, x2, y0 MUL y3, x3, t0 ADCS t0, acc6 UMULH y3, x3, y1 ADC $0, ZR, acc7 ADDS t1, acc4 ADCS hlp0, acc5 ADCS y0, acc6 ADC y1, acc7 // Last reduction step MUL acc3, hlp1, hlp0 MUL const0, hlp1, t0 ADDS t0, acc3, acc3 UMULH const0, hlp0, t1 MUL const1, hlp0, t0 ADCS t0, acc0, acc0 UMULH const1, hlp0, y0 MUL const2, hlp0, t0 ADCS t0, acc1, acc1 UMULH const2, hlp0, acc3 MUL const3, hlp0, t0 ADCS t0, acc2, acc2 UMULH const3, hlp0, hlp0 ADC $0, acc7 ADDS t1, acc0, acc0 ADCS y0, acc1, acc1 ADCS acc3, acc2, acc2 ADC $0, hlp0, acc3 ADDS acc4, acc0, acc0 ADCS acc5, acc1, acc1 ADCS acc6, acc2, acc2 ADCS acc7, acc3, acc3 ADC $0, ZR, acc4 SUBS const0, acc0, t0 SBCS const1, acc1, t1 SBCS const2, acc2, t2 SBCS const3, acc3, t3 SBCS $0, acc4, acc4 CSEL CS, t0, acc0, acc0 CSEL CS, t1, acc1, acc1 CSEL CS, t2, acc2, acc2 CSEL CS, t3, acc3, acc3 MOVD res+0(FP), res_ptr STP (acc0, acc1), 0*16(res_ptr) STP (acc2, acc3), 1*16(res_ptr) RET /* ---------------------------------------*/ TEXT p256SubInternal<>(SB),NOSPLIT,$0 SUBS x0, y0, acc0 SBCS x1, y1, acc1 SBCS x2, y2, acc2 SBCS x3, y3, acc3 SBC $0, ZR, t0 ADDS $-1, acc0, acc4 ADCS const0, acc1, acc5 ADCS $0, acc2, acc6 ADC const1, acc3, acc7 ANDS $1, t0 CSEL EQ, acc0, acc4, x0 CSEL EQ, acc1, acc5, x1 CSEL EQ, acc2, acc6, x2 CSEL EQ, acc3, acc7, x3 RET /* ---------------------------------------*/ TEXT p256SqrInternal<>(SB),NOSPLIT,$0 // x[1:] * x[0] MUL x0, x1, acc1 UMULH x0, x1, acc2 MUL x0, x2, t0 ADDS t0, acc2, acc2 UMULH x0, x2, acc3 MUL x0, x3, t0 ADCS t0, acc3, acc3 UMULH x0, x3, acc4 ADC $0, acc4, acc4 // x[2:] * x[1] MUL x1, x2, t0 ADDS t0, acc3 UMULH x1, x2, t1 ADCS t1, acc4 ADC $0, ZR, acc5 MUL x1, x3, t0 ADDS t0, acc4 UMULH x1, x3, t1 ADC t1, acc5 // x[3] * x[2] MUL x2, x3, t0 ADDS t0, acc5 UMULH x2, x3, acc6 ADC $0, acc6 MOVD $0, acc7 // *2 ADDS acc1, acc1 ADCS acc2, acc2 ADCS acc3, acc3 ADCS acc4, acc4 ADCS acc5, acc5 ADCS acc6, acc6 ADC $0, acc7 // Missing products MUL x0, x0, acc0 UMULH x0, x0, t0 ADDS t0, acc1, acc1 MUL x1, x1, t0 ADCS t0, acc2, acc2 UMULH x1, x1, t1 ADCS t1, acc3, acc3 MUL x2, x2, t0 ADCS t0, acc4, acc4 UMULH x2, x2, t1 ADCS t1, acc5, acc5 MUL x3, x3, t0 ADCS t0, acc6, acc6 UMULH x3, x3, t1 ADCS t1, acc7, acc7 // First reduction step ADDS acc0<<32, acc1, acc1 LSR $32, acc0, t0 MUL acc0, const1, t1 UMULH acc0, const1, acc0 ADCS t0, acc2, acc2 ADCS t1, acc3, acc3 ADC $0, acc0, acc0 // Second reduction step ADDS acc1<<32, acc2, acc2 LSR $32, acc1, t0 MUL acc1, const1, t1 UMULH acc1, const1, acc1 ADCS t0, acc3, acc3 ADCS t1, acc0, acc0 ADC $0, acc1, acc1 // Third reduction step ADDS acc2<<32, acc3, acc3 LSR $32, acc2, t0 MUL acc2, const1, t1 UMULH acc2, const1, acc2 ADCS t0, acc0, acc0 ADCS t1, acc1, acc1 ADC $0, acc2, acc2 // Last reduction step ADDS acc3<<32, acc0, acc0 LSR $32, acc3, t0 MUL acc3, const1, t1 UMULH acc3, const1, acc3 ADCS t0, acc1, acc1 ADCS t1, acc2, acc2 ADC $0, acc3, acc3 // Add bits [511:256] of the sqr result ADDS acc4, acc0, acc0 ADCS acc5, acc1, acc1 ADCS acc6, acc2, acc2 ADCS acc7, acc3, acc3 ADC $0, ZR, acc4 SUBS $-1, acc0, t0 SBCS const0, acc1, t1 SBCS $0, acc2, t2 SBCS const1, acc3, t3 SBCS $0, acc4, acc4 CSEL CS, t0, acc0, y0 CSEL CS, t1, acc1, y1 CSEL CS, t2, acc2, y2 CSEL CS, t3, acc3, y3 RET /* ---------------------------------------*/ TEXT p256MulInternal<>(SB),NOSPLIT,$0 // y[0] * x MUL y0, x0, acc0 UMULH y0, x0, acc1 MUL y0, x1, t0 ADDS t0, acc1 UMULH y0, x1, acc2 MUL y0, x2, t0 ADCS t0, acc2 UMULH y0, x2, acc3 MUL y0, x3, t0 ADCS t0, acc3 UMULH y0, x3, acc4 ADC $0, acc4 // First reduction step ADDS acc0<<32, acc1, acc1 LSR $32, acc0, t0 MUL acc0, const1, t1 UMULH acc0, const1, acc0 ADCS t0, acc2 ADCS t1, acc3 ADC $0, acc0 // y[1] * x MUL y1, x0, t0 ADDS t0, acc1 UMULH y1, x0, t1 MUL y1, x1, t0 ADCS t0, acc2 UMULH y1, x1, t2 MUL y1, x2, t0 ADCS t0, acc3 UMULH y1, x2, t3 MUL y1, x3, t0 ADCS t0, acc4 UMULH y1, x3, hlp0 ADC $0, ZR, acc5 ADDS t1, acc2 ADCS t2, acc3 ADCS t3, acc4 ADC hlp0, acc5 // Second reduction step ADDS acc1<<32, acc2, acc2 LSR $32, acc1, t0 MUL acc1, const1, t1 UMULH acc1, const1, acc1 ADCS t0, acc3 ADCS t1, acc0 ADC $0, acc1 // y[2] * x MUL y2, x0, t0 ADDS t0, acc2 UMULH y2, x0, t1 MUL y2, x1, t0 ADCS t0, acc3 UMULH y2, x1, t2 MUL y2, x2, t0 ADCS t0, acc4 UMULH y2, x2, t3 MUL y2, x3, t0 ADCS t0, acc5 UMULH y2, x3, hlp0 ADC $0, ZR, acc6 ADDS t1, acc3 ADCS t2, acc4 ADCS t3, acc5 ADC hlp0, acc6 // Third reduction step ADDS acc2<<32, acc3, acc3 LSR $32, acc2, t0 MUL acc2, const1, t1 UMULH acc2, const1, acc2 ADCS t0, acc0 ADCS t1, acc1 ADC $0, acc2 // y[3] * x MUL y3, x0, t0 ADDS t0, acc3 UMULH y3, x0, t1 MUL y3, x1, t0 ADCS t0, acc4 UMULH y3, x1, t2 MUL y3, x2, t0 ADCS t0, acc5 UMULH y3, x2, t3 MUL y3, x3, t0 ADCS t0, acc6 UMULH y3, x3, hlp0 ADC $0, ZR, acc7 ADDS t1, acc4 ADCS t2, acc5 ADCS t3, acc6 ADC hlp0, acc7 // Last reduction step ADDS acc3<<32, acc0, acc0 LSR $32, acc3, t0 MUL acc3, const1, t1 UMULH acc3, const1, acc3 ADCS t0, acc1 ADCS t1, acc2 ADC $0, acc3 // Add bits [511:256] of the mul result ADDS acc4, acc0, acc0 ADCS acc5, acc1, acc1 ADCS acc6, acc2, acc2 ADCS acc7, acc3, acc3 ADC $0, ZR, acc4 SUBS $-1, acc0, t0 SBCS const0, acc1, t1 SBCS $0, acc2, t2 SBCS const1, acc3, t3 SBCS $0, acc4, acc4 CSEL CS, t0, acc0, y0 CSEL CS, t1, acc1, y1 CSEL CS, t2, acc2, y2 CSEL CS, t3, acc3, y3 RET /* ---------------------------------------*/ #define p256MulBy2Inline \ ADDS y0, y0, x0; \ ADCS y1, y1, x1; \ ADCS y2, y2, x2; \ ADCS y3, y3, x3; \ ADC $0, ZR, hlp0; \ SUBS $-1, x0, t0; \ SBCS const0, x1, t1;\ SBCS $0, x2, t2; \ SBCS const1, x3, t3;\ SBCS $0, hlp0, hlp0;\ CSEL CC, x0, t0, x0;\ CSEL CC, x1, t1, x1;\ CSEL CC, x2, t2, x2;\ CSEL CC, x3, t3, x3; /* ---------------------------------------*/ #define x1in(off) (off)(a_ptr) #define y1in(off) (off + 32)(a_ptr) #define z1in(off) (off + 64)(a_ptr) #define x2in(off) (off)(b_ptr) #define z2in(off) (off + 64)(b_ptr) #define x3out(off) (off)(res_ptr) #define y3out(off) (off + 32)(res_ptr) #define z3out(off) (off + 64)(res_ptr) #define LDx(src) LDP src(0), (x0, x1); LDP src(16), (x2, x3) #define LDy(src) LDP src(0), (y0, y1); LDP src(16), (y2, y3) #define STx(src) STP (x0, x1), src(0); STP (x2, x3), src(16) #define STy(src) STP (y0, y1), src(0); STP (y2, y3), src(16) /* ---------------------------------------*/ #define y2in(off) (32*0 + 8 + off)(RSP) #define s2(off) (32*1 + 8 + off)(RSP) #define z1sqr(off) (32*2 + 8 + off)(RSP) #define h(off) (32*3 + 8 + off)(RSP) #define r(off) (32*4 + 8 + off)(RSP) #define hsqr(off) (32*5 + 8 + off)(RSP) #define rsqr(off) (32*6 + 8 + off)(RSP) #define hcub(off) (32*7 + 8 + off)(RSP) #define z2sqr(off) (32*8 + 8 + off)(RSP) #define s1(off) (32*9 + 8 + off)(RSP) #define u1(off) (32*10 + 8 + off)(RSP) #define u2(off) (32*11 + 8 + off)(RSP) // func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int) TEXT ·p256PointAddAffineAsm(SB),0,$264-48 MOVD in1+8(FP), a_ptr MOVD in2+16(FP), b_ptr MOVD sign+24(FP), hlp0 MOVD sel+32(FP), hlp1 MOVD zero+40(FP), t2 MOVD $1, t0 CMP $0, t2 CSEL EQ, ZR, t0, t2 CMP $0, hlp1 CSEL EQ, ZR, t0, hlp1 MOVD p256const0<>(SB), const0 MOVD p256const1<>(SB), const1 EOR t2<<1, hlp1 // Negate y2in based on sign LDP 2*16(b_ptr), (y0, y1) LDP 3*16(b_ptr), (y2, y3) MOVD $-1, acc0 SUBS y0, acc0, acc0 SBCS y1, const0, acc1 SBCS y2, ZR, acc2 SBCS y3, const1, acc3 SBC $0, ZR, t0 ADDS $-1, acc0, acc4 ADCS const0, acc1, acc5 ADCS $0, acc2, acc6 ADCS const1, acc3, acc7 ADC $0, t0, t0 CMP $0, t0 CSEL EQ, acc4, acc0, acc0 CSEL EQ, acc5, acc1, acc1 CSEL EQ, acc6, acc2, acc2 CSEL EQ, acc7, acc3, acc3 // If condition is 0, keep original value CMP $0, hlp0 CSEL EQ, y0, acc0, y0 CSEL EQ, y1, acc1, y1 CSEL EQ, y2, acc2, y2 CSEL EQ, y3, acc3, y3 // Store result STy(y2in) // Begin point add LDx(z1in) CALL p256SqrInternal<>(SB) // z1ˆ2 STy(z1sqr) LDx(x2in) CALL p256MulInternal<>(SB) // x2 * z1ˆ2 LDx(x1in) CALL p256SubInternal<>(SB) // h = u2 - u1 STx(h) LDy(z1in) CALL p256MulInternal<>(SB) // z3 = h * z1 LDP 4*16(a_ptr), (acc0, acc1)// iff select[0] == 0, z3 = z1 LDP 5*16(a_ptr), (acc2, acc3) ANDS $1, hlp1, ZR CSEL EQ, acc0, y0, y0 CSEL EQ, acc1, y1, y1 CSEL EQ, acc2, y2, y2 CSEL EQ, acc3, y3, y3 LDP p256one<>+0x00(SB), (acc0, acc1) LDP p256one<>+0x10(SB), (acc2, acc3) ANDS $2, hlp1, ZR // iff select[1] == 0, z3 = 1 CSEL EQ, acc0, y0, y0 CSEL EQ, acc1, y1, y1 CSEL EQ, acc2, y2, y2 CSEL EQ, acc3, y3, y3 LDx(z1in) MOVD res+0(FP), t0 STP (y0, y1), 4*16(t0) STP (y2, y3), 5*16(t0) LDy(z1sqr) CALL p256MulInternal<>(SB) // z1 ^ 3 LDx(y2in) CALL p256MulInternal<>(SB) // s2 = y2 * z1ˆ3 STy(s2) LDx(y1in) CALL p256SubInternal<>(SB) // r = s2 - s1 STx(r) CALL p256SqrInternal<>(SB) // rsqr = rˆ2 STy (rsqr) LDx(h) CALL p256SqrInternal<>(SB) // hsqr = hˆ2 STy(hsqr) CALL p256MulInternal<>(SB) // hcub = hˆ3 STy(hcub) LDx(y1in) CALL p256MulInternal<>(SB) // y1 * hˆ3 STy(s2) LDP hsqr(0*8), (x0, x1) LDP hsqr(2*8), (x2, x3) LDP 0*16(a_ptr), (y0, y1) LDP 1*16(a_ptr), (y2, y3) CALL p256MulInternal<>(SB) // u1 * hˆ2 STP (y0, y1), h(0*8) STP (y2, y3), h(2*8) p256MulBy2Inline // u1 * hˆ2 * 2, inline LDy(rsqr) CALL p256SubInternal<>(SB) // rˆ2 - u1 * hˆ2 * 2 MOVD x0, y0 MOVD x1, y1 MOVD x2, y2 MOVD x3, y3 LDx(hcub) CALL p256SubInternal<>(SB) LDP 0*16(a_ptr), (acc0, acc1) LDP 1*16(a_ptr), (acc2, acc3) ANDS $1, hlp1, ZR // iff select[0] == 0, x3 = x1 CSEL EQ, acc0, x0, x0 CSEL EQ, acc1, x1, x1 CSEL EQ, acc2, x2, x2 CSEL EQ, acc3, x3, x3 LDP 0*16(b_ptr), (acc0, acc1) LDP 1*16(b_ptr), (acc2, acc3) ANDS $2, hlp1, ZR // iff select[1] == 0, x3 = x2 CSEL EQ, acc0, x0, x0 CSEL EQ, acc1, x1, x1 CSEL EQ, acc2, x2, x2 CSEL EQ, acc3, x3, x3 MOVD res+0(FP), t0 STP (x0, x1), 0*16(t0) STP (x2, x3), 1*16(t0) LDP h(0*8), (y0, y1) LDP h(2*8), (y2, y3) CALL p256SubInternal<>(SB) LDP r(0*8), (y0, y1) LDP r(2*8), (y2, y3) CALL p256MulInternal<>(SB) LDP s2(0*8), (x0, x1) LDP s2(2*8), (x2, x3) CALL p256SubInternal<>(SB) LDP 2*16(a_ptr), (acc0, acc1) LDP 3*16(a_ptr), (acc2, acc3) ANDS $1, hlp1, ZR // iff select[0] == 0, y3 = y1 CSEL EQ, acc0, x0, x0 CSEL EQ, acc1, x1, x1 CSEL EQ, acc2, x2, x2 CSEL EQ, acc3, x3, x3 LDP y2in(0*8), (acc0, acc1) LDP y2in(2*8), (acc2, acc3) ANDS $2, hlp1, ZR // iff select[1] == 0, y3 = y2 CSEL EQ, acc0, x0, x0 CSEL EQ, acc1, x1, x1 CSEL EQ, acc2, x2, x2 CSEL EQ, acc3, x3, x3 MOVD res+0(FP), t0 STP (x0, x1), 2*16(t0) STP (x2, x3), 3*16(t0) RET #define p256AddInline \ ADDS y0, x0, x0; \ ADCS y1, x1, x1; \ ADCS y2, x2, x2; \ ADCS y3, x3, x3; \ ADC $0, ZR, hlp0; \ SUBS $-1, x0, t0; \ SBCS const0, x1, t1;\ SBCS $0, x2, t2; \ SBCS const1, x3, t3;\ SBCS $0, hlp0, hlp0;\ CSEL CC, x0, t0, x0;\ CSEL CC, x1, t1, x1;\ CSEL CC, x2, t2, x2;\ CSEL CC, x3, t3, x3; #define s(off) (32*0 + 8 + off)(RSP) #define m(off) (32*1 + 8 + off)(RSP) #define zsqr(off) (32*2 + 8 + off)(RSP) #define tmp(off) (32*3 + 8 + off)(RSP) //func p256PointDoubleAsm(res, in *P256Point) TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$136-16 MOVD res+0(FP), res_ptr MOVD in+8(FP), a_ptr MOVD p256const0<>(SB), const0 MOVD p256const1<>(SB), const1 // Begin point double LDP 4*16(a_ptr), (x0, x1) LDP 5*16(a_ptr), (x2, x3) CALL p256SqrInternal<>(SB) STP (y0, y1), zsqr(0*8) STP (y2, y3), zsqr(2*8) LDP 0*16(a_ptr), (x0, x1) LDP 1*16(a_ptr), (x2, x3) p256AddInline STx(m) LDx(z1in) LDy(y1in) CALL p256MulInternal<>(SB) p256MulBy2Inline STx(z3out) LDy(x1in) LDx(zsqr) CALL p256SubInternal<>(SB) LDy(m) CALL p256MulInternal<>(SB) // Multiply by 3 p256MulBy2Inline p256AddInline STx(m) LDy(y1in) p256MulBy2Inline CALL p256SqrInternal<>(SB) STy(s) MOVD y0, x0 MOVD y1, x1 MOVD y2, x2 MOVD y3, x3 CALL p256SqrInternal<>(SB) // Divide by 2 ADDS $-1, y0, t0 ADCS const0, y1, t1 ADCS $0, y2, t2 ADCS const1, y3, t3 ADC $0, ZR, hlp0 ANDS $1, y0, ZR CSEL EQ, y0, t0, t0 CSEL EQ, y1, t1, t1 CSEL EQ, y2, t2, t2 CSEL EQ, y3, t3, t3 AND y0, hlp0, hlp0 EXTR $1, t0, t1, y0 EXTR $1, t1, t2, y1 EXTR $1, t2, t3, y2 EXTR $1, t3, hlp0, y3 STy(y3out) LDx(x1in) LDy(s) CALL p256MulInternal<>(SB) STy(s) p256MulBy2Inline STx(tmp) LDx(m) CALL p256SqrInternal<>(SB) LDx(tmp) CALL p256SubInternal<>(SB) STx(x3out) LDy(s) CALL p256SubInternal<>(SB) LDy(m) CALL p256MulInternal<>(SB) LDx(y3out) CALL p256SubInternal<>(SB) STx(y3out) RET /* ---------------------------------------*/ #undef y2in #undef x3out #undef y3out #undef z3out #define y2in(off) (off + 32)(b_ptr) #define x3out(off) (off)(b_ptr) #define y3out(off) (off + 32)(b_ptr) #define z3out(off) (off + 64)(b_ptr) // func p256PointAddAsm(res, in1, in2 *P256Point) int TEXT ·p256PointAddAsm(SB),0,$392-32 // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl // Move input to stack in order to free registers MOVD in1+8(FP), a_ptr MOVD in2+16(FP), b_ptr MOVD p256const0<>(SB), const0 MOVD p256const1<>(SB), const1 // Begin point add LDx(z2in) CALL p256SqrInternal<>(SB) // z2^2 STy(z2sqr) CALL p256MulInternal<>(SB) // z2^3 LDx(y1in) CALL p256MulInternal<>(SB) // s1 = z2ˆ3*y1 STy(s1) LDx(z1in) CALL p256SqrInternal<>(SB) // z1^2 STy(z1sqr) CALL p256MulInternal<>(SB) // z1^3 LDx(y2in) CALL p256MulInternal<>(SB) // s2 = z1ˆ3*y2 LDx(s1) CALL p256SubInternal<>(SB) // r = s2 - s1 STx(r) MOVD $1, t2 ORR x0, x1, t0 // Check if zero mod p256 ORR x2, x3, t1 ORR t1, t0, t0 CMP $0, t0 CSEL EQ, t2, ZR, hlp1 EOR $-1, x0, t0 EOR const0, x1, t1 EOR const1, x3, t3 ORR t0, t1, t0 ORR x2, t3, t1 ORR t1, t0, t0 CMP $0, t0 CSEL EQ, t2, hlp1, hlp1 LDx(z2sqr) LDy(x1in) CALL p256MulInternal<>(SB) // u1 = x1 * z2ˆ2 STy(u1) LDx(z1sqr) LDy(x2in) CALL p256MulInternal<>(SB) // u2 = x2 * z1ˆ2 STy(u2) LDx(u1) CALL p256SubInternal<>(SB) // h = u2 - u1 STx(h) MOVD $1, t2 ORR x0, x1, t0 // Check if zero mod p256 ORR x2, x3, t1 ORR t1, t0, t0 CMP $0, t0 CSEL EQ, t2, ZR, hlp0 EOR $-1, x0, t0 EOR const0, x1, t1 EOR const1, x3, t3 ORR t0, t1, t0 ORR x2, t3, t1 ORR t1, t0, t0 CMP $0, t0 CSEL EQ, t2, hlp0, hlp0 AND hlp0, hlp1, hlp1 LDx(r) CALL p256SqrInternal<>(SB) // rsqr = rˆ2 STy(rsqr) LDx(h) CALL p256SqrInternal<>(SB) // hsqr = hˆ2 STy(hsqr) LDx(h) CALL p256MulInternal<>(SB) // hcub = hˆ3 STy(hcub) LDx(s1) CALL p256MulInternal<>(SB) STy(s2) LDx(z1in) LDy(z2in) CALL p256MulInternal<>(SB) // z1 * z2 LDx(h) CALL p256MulInternal<>(SB) // z1 * z2 * h MOVD res+0(FP), b_ptr STy(z3out) LDx(hsqr) LDy(u1) CALL p256MulInternal<>(SB) // hˆ2 * u1 STy(u2) p256MulBy2Inline // u1 * hˆ2 * 2, inline LDy(rsqr) CALL p256SubInternal<>(SB) // rˆ2 - u1 * hˆ2 * 2 MOVD x0, y0 MOVD x1, y1 MOVD x2, y2 MOVD x3, y3 LDx(hcub) CALL p256SubInternal<>(SB) STx(x3out) LDy(u2) CALL p256SubInternal<>(SB) LDy(r) CALL p256MulInternal<>(SB) LDx(s2) CALL p256SubInternal<>(SB) STx(y3out) MOVD hlp1, R0 MOVD R0, ret+24(FP) RET
go/src/crypto/internal/nistec/p256_asm_arm64.s/0
{ "file_path": "go/src/crypto/internal/nistec/p256_asm_arm64.s", "repo_id": "go", "token_count": 18204 }
212
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated by go run gen.go -output md5block.go; DO NOT EDIT. package md5 import ( "internal/byteorder" "math/bits" ) func blockGeneric(dig *digest, p []byte) { // load state a, b, c, d := dig.s[0], dig.s[1], dig.s[2], dig.s[3] for i := 0; i <= len(p)-BlockSize; i += BlockSize { // eliminate bounds checks on p q := p[i:] q = q[:BlockSize:BlockSize] // save current state aa, bb, cc, dd := a, b, c, d // load input block x0 := byteorder.LeUint32(q[4*0x0:]) x1 := byteorder.LeUint32(q[4*0x1:]) x2 := byteorder.LeUint32(q[4*0x2:]) x3 := byteorder.LeUint32(q[4*0x3:]) x4 := byteorder.LeUint32(q[4*0x4:]) x5 := byteorder.LeUint32(q[4*0x5:]) x6 := byteorder.LeUint32(q[4*0x6:]) x7 := byteorder.LeUint32(q[4*0x7:]) x8 := byteorder.LeUint32(q[4*0x8:]) x9 := byteorder.LeUint32(q[4*0x9:]) xa := byteorder.LeUint32(q[4*0xa:]) xb := byteorder.LeUint32(q[4*0xb:]) xc := byteorder.LeUint32(q[4*0xc:]) xd := byteorder.LeUint32(q[4*0xd:]) xe := byteorder.LeUint32(q[4*0xe:]) xf := byteorder.LeUint32(q[4*0xf:]) // round 1 a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x0+0xd76aa478, 7) d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x1+0xe8c7b756, 12) c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x2+0x242070db, 17) b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x3+0xc1bdceee, 22) a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x4+0xf57c0faf, 7) d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x5+0x4787c62a, 12) c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x6+0xa8304613, 17) b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x7+0xfd469501, 22) a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x8+0x698098d8, 7) d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x9+0x8b44f7af, 12) c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xa+0xffff5bb1, 17) b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xb+0x895cd7be, 22) a = b + bits.RotateLeft32((((c^d)&b)^d)+a+xc+0x6b901122, 7) d = a + bits.RotateLeft32((((b^c)&a)^c)+d+xd+0xfd987193, 12) c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xe+0xa679438e, 17) b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xf+0x49b40821, 22) // round 2 a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x1+0xf61e2562, 5) d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x6+0xc040b340, 9) c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xb+0x265e5a51, 14) b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x0+0xe9b6c7aa, 20) a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x5+0xd62f105d, 5) d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xa+0x02441453, 9) c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xf+0xd8a1e681, 14) b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x4+0xe7d3fbc8, 20) a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x9+0x21e1cde6, 5) d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xe+0xc33707d6, 9) c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x3+0xf4d50d87, 14) b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x8+0x455a14ed, 20) a = b + bits.RotateLeft32((((b^c)&d)^c)+a+xd+0xa9e3e905, 5) d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x2+0xfcefa3f8, 9) c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x7+0x676f02d9, 14) b = c + bits.RotateLeft32((((c^d)&a)^d)+b+xc+0x8d2a4c8a, 20) // round 3 a = b + bits.RotateLeft32((b^c^d)+a+x5+0xfffa3942, 4) d = a + bits.RotateLeft32((a^b^c)+d+x8+0x8771f681, 11) c = d + bits.RotateLeft32((d^a^b)+c+xb+0x6d9d6122, 16) b = c + bits.RotateLeft32((c^d^a)+b+xe+0xfde5380c, 23) a = b + bits.RotateLeft32((b^c^d)+a+x1+0xa4beea44, 4) d = a + bits.RotateLeft32((a^b^c)+d+x4+0x4bdecfa9, 11) c = d + bits.RotateLeft32((d^a^b)+c+x7+0xf6bb4b60, 16) b = c + bits.RotateLeft32((c^d^a)+b+xa+0xbebfbc70, 23) a = b + bits.RotateLeft32((b^c^d)+a+xd+0x289b7ec6, 4) d = a + bits.RotateLeft32((a^b^c)+d+x0+0xeaa127fa, 11) c = d + bits.RotateLeft32((d^a^b)+c+x3+0xd4ef3085, 16) b = c + bits.RotateLeft32((c^d^a)+b+x6+0x04881d05, 23) a = b + bits.RotateLeft32((b^c^d)+a+x9+0xd9d4d039, 4) d = a + bits.RotateLeft32((a^b^c)+d+xc+0xe6db99e5, 11) c = d + bits.RotateLeft32((d^a^b)+c+xf+0x1fa27cf8, 16) b = c + bits.RotateLeft32((c^d^a)+b+x2+0xc4ac5665, 23) // round 4 a = b + bits.RotateLeft32((c^(b|^d))+a+x0+0xf4292244, 6) d = a + bits.RotateLeft32((b^(a|^c))+d+x7+0x432aff97, 10) c = d + bits.RotateLeft32((a^(d|^b))+c+xe+0xab9423a7, 15) b = c + bits.RotateLeft32((d^(c|^a))+b+x5+0xfc93a039, 21) a = b + bits.RotateLeft32((c^(b|^d))+a+xc+0x655b59c3, 6) d = a + bits.RotateLeft32((b^(a|^c))+d+x3+0x8f0ccc92, 10) c = d + bits.RotateLeft32((a^(d|^b))+c+xa+0xffeff47d, 15) b = c + bits.RotateLeft32((d^(c|^a))+b+x1+0x85845dd1, 21) a = b + bits.RotateLeft32((c^(b|^d))+a+x8+0x6fa87e4f, 6) d = a + bits.RotateLeft32((b^(a|^c))+d+xf+0xfe2ce6e0, 10) c = d + bits.RotateLeft32((a^(d|^b))+c+x6+0xa3014314, 15) b = c + bits.RotateLeft32((d^(c|^a))+b+xd+0x4e0811a1, 21) a = b + bits.RotateLeft32((c^(b|^d))+a+x4+0xf7537e82, 6) d = a + bits.RotateLeft32((b^(a|^c))+d+xb+0xbd3af235, 10) c = d + bits.RotateLeft32((a^(d|^b))+c+x2+0x2ad7d2bb, 15) b = c + bits.RotateLeft32((d^(c|^a))+b+x9+0xeb86d391, 21) // add saved state a += aa b += bb c += cc d += dd } // save state dig.s[0], dig.s[1], dig.s[2], dig.s[3] = a, b, c, d }
go/src/crypto/md5/md5block.go/0
{ "file_path": "go/src/crypto/md5/md5block.go", "repo_id": "go", "token_count": 3082 }
213
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build js && wasm package rand import "syscall/js" // The maximum buffer size for crypto.getRandomValues is 65536 bytes. // https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues#exceptions const maxGetRandomRead = 64 << 10 var batchedGetRandom func([]byte) error func init() { Reader = &reader{} batchedGetRandom = batched(getRandom, maxGetRandomRead) } var jsCrypto = js.Global().Get("crypto") var uint8Array = js.Global().Get("Uint8Array") // reader implements a pseudorandom generator // using JavaScript crypto.getRandomValues method. // See https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues. type reader struct{} func (r *reader) Read(b []byte) (int, error) { if err := batchedGetRandom(b); err != nil { return 0, err } return len(b), nil } func getRandom(b []byte) error { a := uint8Array.New(len(b)) jsCrypto.Call("getRandomValues", a) js.CopyBytesToGo(b, a) return nil }
go/src/crypto/rand/rand_js.go/0
{ "file_path": "go/src/crypto/rand/rand_js.go", "repo_id": "go", "token_count": 374 }
214
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package rsa_test import ( "bytes" "crypto" "crypto/rand" . "crypto/rsa" "crypto/sha1" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/hex" "encoding/pem" "io" "testing" "testing/quick" ) func decodeBase64(in string) []byte { out := make([]byte, base64.StdEncoding.DecodedLen(len(in))) n, err := base64.StdEncoding.Decode(out, []byte(in)) if err != nil { return nil } return out[0:n] } type DecryptPKCS1v15Test struct { in, out string } // These test vectors were generated with `openssl rsautl -pkcs -encrypt` var decryptPKCS1v15Tests = []DecryptPKCS1v15Test{ { "gIcUIoVkD6ATMBk/u/nlCZCCWRKdkfjCgFdo35VpRXLduiKXhNz1XupLLzTXAybEq15juc+EgY5o0DHv/nt3yg==", "x", }, { "Y7TOCSqofGhkRb+jaVRLzK8xw2cSo1IVES19utzv6hwvx+M8kFsoWQm5DzBeJCZTCVDPkTpavUuEbgp8hnUGDw==", "testing.", }, { "arReP9DJtEVyV2Dg3dDp4c/PSk1O6lxkoJ8HcFupoRorBZG+7+1fDAwT1olNddFnQMjmkb8vxwmNMoTAT/BFjQ==", "testing.\n", }, { "WtaBXIoGC54+vH0NH0CHHE+dRDOsMc/6BrfFu2lEqcKL9+uDuWaf+Xj9mrbQCjjZcpQuX733zyok/jsnqe/Ftw==", "01234567890123456789012345678901234567890123456789012", }, } func TestDecryptPKCS1v15(t *testing.T) { decryptionFuncs := []func([]byte) ([]byte, error){ func(ciphertext []byte) (plaintext []byte, err error) { return DecryptPKCS1v15(nil, rsaPrivateKey, ciphertext) }, func(ciphertext []byte) (plaintext []byte, err error) { return rsaPrivateKey.Decrypt(nil, ciphertext, nil) }, } for _, decryptFunc := range decryptionFuncs { for i, test := range decryptPKCS1v15Tests { out, err := decryptFunc(decodeBase64(test.in)) if err != nil { t.Errorf("#%d error decrypting: %v", i, err) } want := []byte(test.out) if !bytes.Equal(out, want) { t.Errorf("#%d got:%#v want:%#v", i, out, want) } } } } func TestEncryptPKCS1v15(t *testing.T) { random := rand.Reader k := (rsaPrivateKey.N.BitLen() + 7) / 8 tryEncryptDecrypt := func(in []byte, blind bool) bool { if len(in) > k-11 { in = in[0 : k-11] } ciphertext, err := EncryptPKCS1v15(random, &rsaPrivateKey.PublicKey, in) if err != nil { t.Errorf("error encrypting: %s", err) return false } var rand io.Reader if !blind { rand = nil } else { rand = random } plaintext, err := DecryptPKCS1v15(rand, rsaPrivateKey, ciphertext) if err != nil { t.Errorf("error decrypting: %s", err) return false } if !bytes.Equal(plaintext, in) { t.Errorf("output mismatch: %#v %#v", plaintext, in) return false } return true } config := new(quick.Config) if testing.Short() { config.MaxCount = 10 } quick.Check(tryEncryptDecrypt, config) } // These test vectors were generated with `openssl rsautl -pkcs -encrypt` var decryptPKCS1v15SessionKeyTests = []DecryptPKCS1v15Test{ { "e6ukkae6Gykq0fKzYwULpZehX+UPXYzMoB5mHQUDEiclRbOTqas4Y0E6nwns1BBpdvEJcilhl5zsox/6DtGsYg==", "1234", }, { "Dtis4uk/q/LQGGqGk97P59K03hkCIVFMEFZRgVWOAAhxgYpCRG0MX2adptt92l67IqMki6iVQyyt0TtX3IdtEw==", "FAIL", }, { "LIyFyCYCptPxrvTxpol8F3M7ZivlMsf53zs0vHRAv+rDIh2YsHS69ePMoPMe3TkOMZ3NupiL3takPxIs1sK+dw==", "abcd", }, { "bafnobel46bKy76JzqU/RIVOH0uAYvzUtauKmIidKgM0sMlvobYVAVQPeUQ/oTGjbIZ1v/6Gyi5AO4DtHruGdw==", "FAIL", }, } func TestEncryptPKCS1v15SessionKey(t *testing.T) { for i, test := range decryptPKCS1v15SessionKeyTests { key := []byte("FAIL") err := DecryptPKCS1v15SessionKey(nil, rsaPrivateKey, decodeBase64(test.in), key) if err != nil { t.Errorf("#%d error decrypting", i) } want := []byte(test.out) if !bytes.Equal(key, want) { t.Errorf("#%d got:%#v want:%#v", i, key, want) } } } func TestEncryptPKCS1v15DecrypterSessionKey(t *testing.T) { for i, test := range decryptPKCS1v15SessionKeyTests { plaintext, err := rsaPrivateKey.Decrypt(rand.Reader, decodeBase64(test.in), &PKCS1v15DecryptOptions{SessionKeyLen: 4}) if err != nil { t.Fatalf("#%d: error decrypting: %s", i, err) } if len(plaintext) != 4 { t.Fatalf("#%d: incorrect length plaintext: got %d, want 4", i, len(plaintext)) } if test.out != "FAIL" && !bytes.Equal(plaintext, []byte(test.out)) { t.Errorf("#%d: incorrect plaintext: got %x, want %x", i, plaintext, test.out) } } } func TestNonZeroRandomBytes(t *testing.T) { random := rand.Reader b := make([]byte, 512) err := NonZeroRandomBytes(b, random) if err != nil { t.Errorf("returned error: %s", err) } for _, b := range b { if b == 0 { t.Errorf("Zero octet found") return } } } type signPKCS1v15Test struct { in, out string } // These vectors have been tested with // // `openssl rsautl -verify -inkey pk -in signature | hexdump -C` var signPKCS1v15Tests = []signPKCS1v15Test{ {"Test.\n", "a4f3fa6ea93bcdd0c57be020c1193ecbfd6f200a3d95c409769b029578fa0e336ad9a347600e40d3ae823b8c7e6bad88cc07c1d54c3a1523cbbb6d58efc362ae"}, } func TestSignPKCS1v15(t *testing.T) { for i, test := range signPKCS1v15Tests { h := sha1.New() h.Write([]byte(test.in)) digest := h.Sum(nil) s, err := SignPKCS1v15(nil, rsaPrivateKey, crypto.SHA1, digest) if err != nil { t.Errorf("#%d %s", i, err) } expected, _ := hex.DecodeString(test.out) if !bytes.Equal(s, expected) { t.Errorf("#%d got: %x want: %x", i, s, expected) } } } func TestVerifyPKCS1v15(t *testing.T) { for i, test := range signPKCS1v15Tests { h := sha1.New() h.Write([]byte(test.in)) digest := h.Sum(nil) sig, _ := hex.DecodeString(test.out) err := VerifyPKCS1v15(&rsaPrivateKey.PublicKey, crypto.SHA1, digest, sig) if err != nil { t.Errorf("#%d %s", i, err) } } } func TestOverlongMessagePKCS1v15(t *testing.T) { ciphertext := decodeBase64("fjOVdirUzFoLlukv80dBllMLjXythIf22feqPrNo0YoIjzyzyoMFiLjAc/Y4krkeZ11XFThIrEvw\nkRiZcCq5ng==") _, err := DecryptPKCS1v15(nil, rsaPrivateKey, ciphertext) if err == nil { t.Error("RSA decrypted a message that was too long.") } } func TestUnpaddedSignature(t *testing.T) { msg := []byte("Thu Dec 19 18:06:16 EST 2013\n") // This base64 value was generated with: // % echo Thu Dec 19 18:06:16 EST 2013 > /tmp/msg // % openssl rsautl -sign -inkey key -out /tmp/sig -in /tmp/msg // // Where "key" contains the RSA private key given at the bottom of this // file. expectedSig := decodeBase64("pX4DR8azytjdQ1rtUiC040FjkepuQut5q2ZFX1pTjBrOVKNjgsCDyiJDGZTCNoh9qpXYbhl7iEym30BWWwuiZg==") sig, err := SignPKCS1v15(nil, rsaPrivateKey, crypto.Hash(0), msg) if err != nil { t.Fatalf("SignPKCS1v15 failed: %s", err) } if !bytes.Equal(sig, expectedSig) { t.Fatalf("signature is not expected value: got %x, want %x", sig, expectedSig) } if err := VerifyPKCS1v15(&rsaPrivateKey.PublicKey, crypto.Hash(0), msg, sig); err != nil { t.Fatalf("signature failed to verify: %s", err) } } func TestShortSessionKey(t *testing.T) { // This tests that attempting to decrypt a session key where the // ciphertext is too small doesn't run outside the array bounds. ciphertext, err := EncryptPKCS1v15(rand.Reader, &rsaPrivateKey.PublicKey, []byte{1}) if err != nil { t.Fatalf("Failed to encrypt short message: %s", err) } var key [32]byte if err := DecryptPKCS1v15SessionKey(nil, rsaPrivateKey, ciphertext, key[:]); err != nil { t.Fatalf("Failed to decrypt short message: %s", err) } for _, v := range key { if v != 0 { t.Fatal("key was modified when ciphertext was invalid") } } } var rsaPrivateKey = parseKey(testingKey(`-----BEGIN RSA TESTING KEY----- MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0 fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu /ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/ EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V -----END RSA TESTING KEY-----`)) func parsePublicKey(s string) *PublicKey { p, _ := pem.Decode([]byte(s)) k, err := x509.ParsePKCS1PublicKey(p.Bytes) if err != nil { panic(err) } return k } func TestShortPKCS1v15Signature(t *testing.T) { pub := parsePublicKey(`-----BEGIN RSA PUBLIC KEY----- MEgCQQCd9BVzo775lkohasxjnefF1nCMcNoibqIWEVDe/K7M2GSoO4zlSQB+gkix O3AnTcdHB51iaZpWfxPSnew8yfulAgMBAAE= -----END RSA PUBLIC KEY-----`) sig, err := hex.DecodeString("193a310d0dcf64094c6e3a00c8219b80ded70535473acff72c08e1222974bb24a93a535b1dc4c59fc0e65775df7ba2007dd20e9193f4c4025a18a7070aee93") if err != nil { t.Fatalf("failed to decode signature: %s", err) } h := sha256.Sum256([]byte("hello")) err = VerifyPKCS1v15(pub, crypto.SHA256, h[:], sig) if err == nil { t.Fatal("VerifyPKCS1v15 accepted a truncated signature") } }
go/src/crypto/rsa/pkcs1v15_test.go/0
{ "file_path": "go/src/crypto/rsa/pkcs1v15_test.go", "repo_id": "go", "token_count": 4254 }
215
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // // ARM version of md5block.go //go:build !purego #include "textflag.h" // SHA-1 block routine. See sha1block.go for Go equivalent. // // There are 80 rounds of 4 types: // - rounds 0-15 are type 1 and load data (ROUND1 macro). // - rounds 16-19 are type 1 and do not load data (ROUND1x macro). // - rounds 20-39 are type 2 and do not load data (ROUND2 macro). // - rounds 40-59 are type 3 and do not load data (ROUND3 macro). // - rounds 60-79 are type 4 and do not load data (ROUND4 macro). // // Each round loads or shuffles the data, then computes a per-round // function of b, c, d, and then mixes the result into and rotates the // five registers a, b, c, d, e holding the intermediate results. // // The register rotation is implemented by rotating the arguments to // the round macros instead of by explicit move instructions. // Register definitions #define Rdata R0 // Pointer to incoming data #define Rconst R1 // Current constant for SHA round #define Ra R2 // SHA-1 accumulator #define Rb R3 // SHA-1 accumulator #define Rc R4 // SHA-1 accumulator #define Rd R5 // SHA-1 accumulator #define Re R6 // SHA-1 accumulator #define Rt0 R7 // Temporary #define Rt1 R8 // Temporary // r9, r10 are forbidden // r11 is OK provided you check the assembler that no synthetic instructions use it #define Rt2 R11 // Temporary #define Rctr R12 // loop counter #define Rw R14 // point to w buffer // func block(dig *digest, p []byte) // 0(FP) is *digest // 4(FP) is p.array (struct Slice) // 8(FP) is p.len //12(FP) is p.cap // // Stack frame #define p_end end-4(SP) // pointer to the end of data #define p_data data-8(SP) // current data pointer (unused?) #define w_buf buf-(8+4*80)(SP) //80 words temporary buffer w uint32[80] #define saved abcde-(8+4*80+4*5)(SP) // saved sha1 registers a,b,c,d,e - these must be last (unused?) // Total size +4 for saved LR is 352 // w[i] = p[j]<<24 | p[j+1]<<16 | p[j+2]<<8 | p[j+3] // e += w[i] #define LOAD(Re) \ MOVBU 2(Rdata), Rt0 ; \ MOVBU 3(Rdata), Rt1 ; \ MOVBU 1(Rdata), Rt2 ; \ ORR Rt0<<8, Rt1, Rt0 ; \ MOVBU.P 4(Rdata), Rt1 ; \ ORR Rt2<<16, Rt0, Rt0 ; \ ORR Rt1<<24, Rt0, Rt0 ; \ MOVW.P Rt0, 4(Rw) ; \ ADD Rt0, Re, Re // tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] // w[i&0xf] = tmp<<1 | tmp>>(32-1) // e += w[i&0xf] #define SHUFFLE(Re) \ MOVW (-16*4)(Rw), Rt0 ; \ MOVW (-14*4)(Rw), Rt1 ; \ MOVW (-8*4)(Rw), Rt2 ; \ EOR Rt0, Rt1, Rt0 ; \ MOVW (-3*4)(Rw), Rt1 ; \ EOR Rt2, Rt0, Rt0 ; \ EOR Rt0, Rt1, Rt0 ; \ MOVW Rt0@>(32-1), Rt0 ; \ MOVW.P Rt0, 4(Rw) ; \ ADD Rt0, Re, Re // t1 = (b & c) | ((~b) & d) #define FUNC1(Ra, Rb, Rc, Rd, Re) \ MVN Rb, Rt1 ; \ AND Rb, Rc, Rt0 ; \ AND Rd, Rt1, Rt1 ; \ ORR Rt0, Rt1, Rt1 // t1 = b ^ c ^ d #define FUNC2(Ra, Rb, Rc, Rd, Re) \ EOR Rb, Rc, Rt1 ; \ EOR Rd, Rt1, Rt1 // t1 = (b & c) | (b & d) | (c & d) = // t1 = (b & c) | ((b | c) & d) #define FUNC3(Ra, Rb, Rc, Rd, Re) \ ORR Rb, Rc, Rt0 ; \ AND Rb, Rc, Rt1 ; \ AND Rd, Rt0, Rt0 ; \ ORR Rt0, Rt1, Rt1 #define FUNC4 FUNC2 // a5 := a<<5 | a>>(32-5) // b = b<<30 | b>>(32-30) // e = a5 + t1 + e + const #define MIX(Ra, Rb, Rc, Rd, Re) \ ADD Rt1, Re, Re ; \ MOVW Rb@>(32-30), Rb ; \ ADD Ra@>(32-5), Re, Re ; \ ADD Rconst, Re, Re #define ROUND1(Ra, Rb, Rc, Rd, Re) \ LOAD(Re) ; \ FUNC1(Ra, Rb, Rc, Rd, Re) ; \ MIX(Ra, Rb, Rc, Rd, Re) #define ROUND1x(Ra, Rb, Rc, Rd, Re) \ SHUFFLE(Re) ; \ FUNC1(Ra, Rb, Rc, Rd, Re) ; \ MIX(Ra, Rb, Rc, Rd, Re) #define ROUND2(Ra, Rb, Rc, Rd, Re) \ SHUFFLE(Re) ; \ FUNC2(Ra, Rb, Rc, Rd, Re) ; \ MIX(Ra, Rb, Rc, Rd, Re) #define ROUND3(Ra, Rb, Rc, Rd, Re) \ SHUFFLE(Re) ; \ FUNC3(Ra, Rb, Rc, Rd, Re) ; \ MIX(Ra, Rb, Rc, Rd, Re) #define ROUND4(Ra, Rb, Rc, Rd, Re) \ SHUFFLE(Re) ; \ FUNC4(Ra, Rb, Rc, Rd, Re) ; \ MIX(Ra, Rb, Rc, Rd, Re) // func block(dig *digest, p []byte) TEXT ·block(SB), 0, $352-16 MOVW p+4(FP), Rdata // pointer to the data MOVW p_len+8(FP), Rt0 // number of bytes ADD Rdata, Rt0 MOVW Rt0, p_end // pointer to end of data // Load up initial SHA-1 accumulator MOVW dig+0(FP), Rt0 MOVM.IA (Rt0), [Ra,Rb,Rc,Rd,Re] loop: // Save registers at SP+4 onwards MOVM.IB [Ra,Rb,Rc,Rd,Re], (R13) MOVW $w_buf, Rw MOVW $0x5A827999, Rconst MOVW $3, Rctr loop1: ROUND1(Ra, Rb, Rc, Rd, Re) ROUND1(Re, Ra, Rb, Rc, Rd) ROUND1(Rd, Re, Ra, Rb, Rc) ROUND1(Rc, Rd, Re, Ra, Rb) ROUND1(Rb, Rc, Rd, Re, Ra) SUB.S $1, Rctr BNE loop1 ROUND1(Ra, Rb, Rc, Rd, Re) ROUND1x(Re, Ra, Rb, Rc, Rd) ROUND1x(Rd, Re, Ra, Rb, Rc) ROUND1x(Rc, Rd, Re, Ra, Rb) ROUND1x(Rb, Rc, Rd, Re, Ra) MOVW $0x6ED9EBA1, Rconst MOVW $4, Rctr loop2: ROUND2(Ra, Rb, Rc, Rd, Re) ROUND2(Re, Ra, Rb, Rc, Rd) ROUND2(Rd, Re, Ra, Rb, Rc) ROUND2(Rc, Rd, Re, Ra, Rb) ROUND2(Rb, Rc, Rd, Re, Ra) SUB.S $1, Rctr BNE loop2 MOVW $0x8F1BBCDC, Rconst MOVW $4, Rctr loop3: ROUND3(Ra, Rb, Rc, Rd, Re) ROUND3(Re, Ra, Rb, Rc, Rd) ROUND3(Rd, Re, Ra, Rb, Rc) ROUND3(Rc, Rd, Re, Ra, Rb) ROUND3(Rb, Rc, Rd, Re, Ra) SUB.S $1, Rctr BNE loop3 MOVW $0xCA62C1D6, Rconst MOVW $4, Rctr loop4: ROUND4(Ra, Rb, Rc, Rd, Re) ROUND4(Re, Ra, Rb, Rc, Rd) ROUND4(Rd, Re, Ra, Rb, Rc) ROUND4(Rc, Rd, Re, Ra, Rb) ROUND4(Rb, Rc, Rd, Re, Ra) SUB.S $1, Rctr BNE loop4 // Accumulate - restoring registers from SP+4 MOVM.IB (R13), [Rt0,Rt1,Rt2,Rctr,Rw] ADD Rt0, Ra ADD Rt1, Rb ADD Rt2, Rc ADD Rctr, Rd ADD Rw, Re MOVW p_end, Rt0 CMP Rt0, Rdata BLO loop // Save final SHA-1 accumulator MOVW dig+0(FP), Rt0 MOVM.IA [Ra,Rb,Rc,Rd,Re], (Rt0) RET
go/src/crypto/sha1/sha1block_arm.s/0
{ "file_path": "go/src/crypto/sha1/sha1block_arm.s", "repo_id": "go", "token_count": 3029 }
216
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !purego #include "textflag.h" #define HASHUPDATE \ SHA256H V9.S4, V3, V2 \ SHA256H2 V9.S4, V8, V3 \ VMOV V2.B16, V8.B16 // func sha256block(h []uint32, p []byte, k []uint32) TEXT ·sha256block(SB),NOSPLIT,$0 MOVD h_base+0(FP), R0 // Hash value first address MOVD p_base+24(FP), R1 // message first address MOVD k_base+48(FP), R2 // k constants first address MOVD p_len+32(FP), R3 // message length VLD1 (R0), [V0.S4, V1.S4] // load h(a,b,c,d,e,f,g,h) VLD1.P 64(R2), [V16.S4, V17.S4, V18.S4, V19.S4] VLD1.P 64(R2), [V20.S4, V21.S4, V22.S4, V23.S4] VLD1.P 64(R2), [V24.S4, V25.S4, V26.S4, V27.S4] VLD1 (R2), [V28.S4, V29.S4, V30.S4, V31.S4] //load 64*4bytes K constant(K0-K63) blockloop: VLD1.P 16(R1), [V4.B16] // load 16bytes message VLD1.P 16(R1), [V5.B16] // load 16bytes message VLD1.P 16(R1), [V6.B16] // load 16bytes message VLD1.P 16(R1), [V7.B16] // load 16bytes message VMOV V0.B16, V2.B16 // backup: VO h(dcba) VMOV V1.B16, V3.B16 // backup: V1 h(hgfe) VMOV V2.B16, V8.B16 VREV32 V4.B16, V4.B16 // prepare for using message in Byte format VREV32 V5.B16, V5.B16 VREV32 V6.B16, V6.B16 VREV32 V7.B16, V7.B16 VADD V16.S4, V4.S4, V9.S4 // V18(W0+K0...W3+K3) SHA256SU0 V5.S4, V4.S4 // V4: (su0(W1)+W0,...,su0(W4)+W3) HASHUPDATE // H4 VADD V17.S4, V5.S4, V9.S4 // V18(W4+K4...W7+K7) SHA256SU0 V6.S4, V5.S4 // V5: (su0(W5)+W4,...,su0(W8)+W7) SHA256SU1 V7.S4, V6.S4, V4.S4 // V4: W16-W19 HASHUPDATE // H8 VADD V18.S4, V6.S4, V9.S4 // V18(W8+K8...W11+K11) SHA256SU0 V7.S4, V6.S4 // V6: (su0(W9)+W8,...,su0(W12)+W11) SHA256SU1 V4.S4, V7.S4, V5.S4 // V5: W20-W23 HASHUPDATE // H12 VADD V19.S4, V7.S4, V9.S4 // V18(W12+K12...W15+K15) SHA256SU0 V4.S4, V7.S4 // V7: (su0(W13)+W12,...,su0(W16)+W15) SHA256SU1 V5.S4, V4.S4, V6.S4 // V6: W24-W27 HASHUPDATE // H16 VADD V20.S4, V4.S4, V9.S4 // V18(W16+K16...W19+K19) SHA256SU0 V5.S4, V4.S4 // V4: (su0(W17)+W16,...,su0(W20)+W19) SHA256SU1 V6.S4, V5.S4, V7.S4 // V7: W28-W31 HASHUPDATE // H20 VADD V21.S4, V5.S4, V9.S4 // V18(W20+K20...W23+K23) SHA256SU0 V6.S4, V5.S4 // V5: (su0(W21)+W20,...,su0(W24)+W23) SHA256SU1 V7.S4, V6.S4, V4.S4 // V4: W32-W35 HASHUPDATE // H24 VADD V22.S4, V6.S4, V9.S4 // V18(W24+K24...W27+K27) SHA256SU0 V7.S4, V6.S4 // V6: (su0(W25)+W24,...,su0(W28)+W27) SHA256SU1 V4.S4, V7.S4, V5.S4 // V5: W36-W39 HASHUPDATE // H28 VADD V23.S4, V7.S4, V9.S4 // V18(W28+K28...W31+K31) SHA256SU0 V4.S4, V7.S4 // V7: (su0(W29)+W28,...,su0(W32)+W31) SHA256SU1 V5.S4, V4.S4, V6.S4 // V6: W40-W43 HASHUPDATE // H32 VADD V24.S4, V4.S4, V9.S4 // V18(W32+K32...W35+K35) SHA256SU0 V5.S4, V4.S4 // V4: (su0(W33)+W32,...,su0(W36)+W35) SHA256SU1 V6.S4, V5.S4, V7.S4 // V7: W44-W47 HASHUPDATE // H36 VADD V25.S4, V5.S4, V9.S4 // V18(W36+K36...W39+K39) SHA256SU0 V6.S4, V5.S4 // V5: (su0(W37)+W36,...,su0(W40)+W39) SHA256SU1 V7.S4, V6.S4, V4.S4 // V4: W48-W51 HASHUPDATE // H40 VADD V26.S4, V6.S4, V9.S4 // V18(W40+K40...W43+K43) SHA256SU0 V7.S4, V6.S4 // V6: (su0(W41)+W40,...,su0(W44)+W43) SHA256SU1 V4.S4, V7.S4, V5.S4 // V5: W52-W55 HASHUPDATE // H44 VADD V27.S4, V7.S4, V9.S4 // V18(W44+K44...W47+K47) SHA256SU0 V4.S4, V7.S4 // V7: (su0(W45)+W44,...,su0(W48)+W47) SHA256SU1 V5.S4, V4.S4, V6.S4 // V6: W56-W59 HASHUPDATE // H48 VADD V28.S4, V4.S4, V9.S4 // V18(W48+K48,...,W51+K51) HASHUPDATE // H52 SHA256SU1 V6.S4, V5.S4, V7.S4 // V7: W60-W63 VADD V29.S4, V5.S4, V9.S4 // V18(W52+K52,...,W55+K55) HASHUPDATE // H56 VADD V30.S4, V6.S4, V9.S4 // V18(W59+K59,...,W59+K59) HASHUPDATE // H60 VADD V31.S4, V7.S4, V9.S4 // V18(W60+K60,...,W63+K63) HASHUPDATE // H64 SUB $64, R3, R3 // message length - 64bytes, then compare with 64bytes VADD V2.S4, V0.S4, V0.S4 VADD V3.S4, V1.S4, V1.S4 CBNZ R3, blockloop sha256ret: VST1 [V0.S4, V1.S4], (R0) // store hash value H RET
go/src/crypto/sha256/sha256block_arm64.s/0
{ "file_path": "go/src/crypto/sha256/sha256block_arm64.s", "repo_id": "go", "token_count": 4326 }
217
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on CRYPTOGAMS code with the following comment: // # ==================================================================== // # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // # project. The module is, however, dual licensed under OpenSSL and // # CRYPTOGAMS licenses depending on where you obtain it. For further // # details see http://www.openssl.org/~appro/cryptogams/. // # ==================================================================== //go:build (ppc64 || ppc64le) && !purego #include "textflag.h" // SHA512 block routine. See sha512block.go for Go equivalent. // // The algorithm is detailed in FIPS 180-4: // // https://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf // // Wt = Mt; for 0 <= t <= 15 // Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 // // a = H0 // b = H1 // c = H2 // d = H3 // e = H4 // f = H5 // g = H6 // h = H7 // // for t = 0 to 79 { // T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt // T2 = BIGSIGMA0(a) + Maj(a,b,c) // h = g // g = f // f = e // e = d + T1 // d = c // c = b // b = a // a = T1 + T2 // } // // H0 = a + H0 // H1 = b + H1 // H2 = c + H2 // H3 = d + H3 // H4 = e + H4 // H5 = f + H5 // H6 = g + H6 // H7 = h + H7 #define CTX R3 #define INP R4 #define END R5 #define TBL R6 #define CNT R8 #define LEN R9 #define TEMP R12 #define TBL_STRT R7 // Pointer to start of kcon table. #define R_x000 R0 #define R_x010 R10 #define R_x020 R25 #define R_x030 R26 #define R_x040 R14 #define R_x050 R15 #define R_x060 R16 #define R_x070 R17 #define R_x080 R18 #define R_x090 R19 #define R_x0a0 R20 #define R_x0b0 R21 #define R_x0c0 R22 #define R_x0d0 R23 #define R_x0e0 R24 #define R_x0f0 R28 #define R_x100 R29 #define R_x110 R27 // V0-V7 are A-H // V8-V23 are used for the message schedule #define KI V24 #define FUNC V25 #define S0 V26 #define S1 V27 #define s0 V28 #define s1 V29 #define LEMASK V31 // Permutation control register for little endian // VPERM is needed on LE to switch the bytes #ifdef GOARCH_ppc64le #define VPERMLE(va,vb,vc,vt) VPERM va, vb, vc, vt #else #define VPERMLE(va,vb,vc,vt) #endif // 2 copies of each Kt, to fill both doublewords of a vector register DATA ·kcon+0x000(SB)/8, $0x428a2f98d728ae22 DATA ·kcon+0x008(SB)/8, $0x428a2f98d728ae22 DATA ·kcon+0x010(SB)/8, $0x7137449123ef65cd DATA ·kcon+0x018(SB)/8, $0x7137449123ef65cd DATA ·kcon+0x020(SB)/8, $0xb5c0fbcfec4d3b2f DATA ·kcon+0x028(SB)/8, $0xb5c0fbcfec4d3b2f DATA ·kcon+0x030(SB)/8, $0xe9b5dba58189dbbc DATA ·kcon+0x038(SB)/8, $0xe9b5dba58189dbbc DATA ·kcon+0x040(SB)/8, $0x3956c25bf348b538 DATA ·kcon+0x048(SB)/8, $0x3956c25bf348b538 DATA ·kcon+0x050(SB)/8, $0x59f111f1b605d019 DATA ·kcon+0x058(SB)/8, $0x59f111f1b605d019 DATA ·kcon+0x060(SB)/8, $0x923f82a4af194f9b DATA ·kcon+0x068(SB)/8, $0x923f82a4af194f9b DATA ·kcon+0x070(SB)/8, $0xab1c5ed5da6d8118 DATA ·kcon+0x078(SB)/8, $0xab1c5ed5da6d8118 DATA ·kcon+0x080(SB)/8, $0xd807aa98a3030242 DATA ·kcon+0x088(SB)/8, $0xd807aa98a3030242 DATA ·kcon+0x090(SB)/8, $0x12835b0145706fbe DATA ·kcon+0x098(SB)/8, $0x12835b0145706fbe DATA ·kcon+0x0A0(SB)/8, $0x243185be4ee4b28c DATA ·kcon+0x0A8(SB)/8, $0x243185be4ee4b28c DATA ·kcon+0x0B0(SB)/8, $0x550c7dc3d5ffb4e2 DATA ·kcon+0x0B8(SB)/8, $0x550c7dc3d5ffb4e2 DATA ·kcon+0x0C0(SB)/8, $0x72be5d74f27b896f DATA ·kcon+0x0C8(SB)/8, $0x72be5d74f27b896f DATA ·kcon+0x0D0(SB)/8, $0x80deb1fe3b1696b1 DATA ·kcon+0x0D8(SB)/8, $0x80deb1fe3b1696b1 DATA ·kcon+0x0E0(SB)/8, $0x9bdc06a725c71235 DATA ·kcon+0x0E8(SB)/8, $0x9bdc06a725c71235 DATA ·kcon+0x0F0(SB)/8, $0xc19bf174cf692694 DATA ·kcon+0x0F8(SB)/8, $0xc19bf174cf692694 DATA ·kcon+0x100(SB)/8, $0xe49b69c19ef14ad2 DATA ·kcon+0x108(SB)/8, $0xe49b69c19ef14ad2 DATA ·kcon+0x110(SB)/8, $0xefbe4786384f25e3 DATA ·kcon+0x118(SB)/8, $0xefbe4786384f25e3 DATA ·kcon+0x120(SB)/8, $0x0fc19dc68b8cd5b5 DATA ·kcon+0x128(SB)/8, $0x0fc19dc68b8cd5b5 DATA ·kcon+0x130(SB)/8, $0x240ca1cc77ac9c65 DATA ·kcon+0x138(SB)/8, $0x240ca1cc77ac9c65 DATA ·kcon+0x140(SB)/8, $0x2de92c6f592b0275 DATA ·kcon+0x148(SB)/8, $0x2de92c6f592b0275 DATA ·kcon+0x150(SB)/8, $0x4a7484aa6ea6e483 DATA ·kcon+0x158(SB)/8, $0x4a7484aa6ea6e483 DATA ·kcon+0x160(SB)/8, $0x5cb0a9dcbd41fbd4 DATA ·kcon+0x168(SB)/8, $0x5cb0a9dcbd41fbd4 DATA ·kcon+0x170(SB)/8, $0x76f988da831153b5 DATA ·kcon+0x178(SB)/8, $0x76f988da831153b5 DATA ·kcon+0x180(SB)/8, $0x983e5152ee66dfab DATA ·kcon+0x188(SB)/8, $0x983e5152ee66dfab DATA ·kcon+0x190(SB)/8, $0xa831c66d2db43210 DATA ·kcon+0x198(SB)/8, $0xa831c66d2db43210 DATA ·kcon+0x1A0(SB)/8, $0xb00327c898fb213f DATA ·kcon+0x1A8(SB)/8, $0xb00327c898fb213f DATA ·kcon+0x1B0(SB)/8, $0xbf597fc7beef0ee4 DATA ·kcon+0x1B8(SB)/8, $0xbf597fc7beef0ee4 DATA ·kcon+0x1C0(SB)/8, $0xc6e00bf33da88fc2 DATA ·kcon+0x1C8(SB)/8, $0xc6e00bf33da88fc2 DATA ·kcon+0x1D0(SB)/8, $0xd5a79147930aa725 DATA ·kcon+0x1D8(SB)/8, $0xd5a79147930aa725 DATA ·kcon+0x1E0(SB)/8, $0x06ca6351e003826f DATA ·kcon+0x1E8(SB)/8, $0x06ca6351e003826f DATA ·kcon+0x1F0(SB)/8, $0x142929670a0e6e70 DATA ·kcon+0x1F8(SB)/8, $0x142929670a0e6e70 DATA ·kcon+0x200(SB)/8, $0x27b70a8546d22ffc DATA ·kcon+0x208(SB)/8, $0x27b70a8546d22ffc DATA ·kcon+0x210(SB)/8, $0x2e1b21385c26c926 DATA ·kcon+0x218(SB)/8, $0x2e1b21385c26c926 DATA ·kcon+0x220(SB)/8, $0x4d2c6dfc5ac42aed DATA ·kcon+0x228(SB)/8, $0x4d2c6dfc5ac42aed DATA ·kcon+0x230(SB)/8, $0x53380d139d95b3df DATA ·kcon+0x238(SB)/8, $0x53380d139d95b3df DATA ·kcon+0x240(SB)/8, $0x650a73548baf63de DATA ·kcon+0x248(SB)/8, $0x650a73548baf63de DATA ·kcon+0x250(SB)/8, $0x766a0abb3c77b2a8 DATA ·kcon+0x258(SB)/8, $0x766a0abb3c77b2a8 DATA ·kcon+0x260(SB)/8, $0x81c2c92e47edaee6 DATA ·kcon+0x268(SB)/8, $0x81c2c92e47edaee6 DATA ·kcon+0x270(SB)/8, $0x92722c851482353b DATA ·kcon+0x278(SB)/8, $0x92722c851482353b DATA ·kcon+0x280(SB)/8, $0xa2bfe8a14cf10364 DATA ·kcon+0x288(SB)/8, $0xa2bfe8a14cf10364 DATA ·kcon+0x290(SB)/8, $0xa81a664bbc423001 DATA ·kcon+0x298(SB)/8, $0xa81a664bbc423001 DATA ·kcon+0x2A0(SB)/8, $0xc24b8b70d0f89791 DATA ·kcon+0x2A8(SB)/8, $0xc24b8b70d0f89791 DATA ·kcon+0x2B0(SB)/8, $0xc76c51a30654be30 DATA ·kcon+0x2B8(SB)/8, $0xc76c51a30654be30 DATA ·kcon+0x2C0(SB)/8, $0xd192e819d6ef5218 DATA ·kcon+0x2C8(SB)/8, $0xd192e819d6ef5218 DATA ·kcon+0x2D0(SB)/8, $0xd69906245565a910 DATA ·kcon+0x2D8(SB)/8, $0xd69906245565a910 DATA ·kcon+0x2E0(SB)/8, $0xf40e35855771202a DATA ·kcon+0x2E8(SB)/8, $0xf40e35855771202a DATA ·kcon+0x2F0(SB)/8, $0x106aa07032bbd1b8 DATA ·kcon+0x2F8(SB)/8, $0x106aa07032bbd1b8 DATA ·kcon+0x300(SB)/8, $0x19a4c116b8d2d0c8 DATA ·kcon+0x308(SB)/8, $0x19a4c116b8d2d0c8 DATA ·kcon+0x310(SB)/8, $0x1e376c085141ab53 DATA ·kcon+0x318(SB)/8, $0x1e376c085141ab53 DATA ·kcon+0x320(SB)/8, $0x2748774cdf8eeb99 DATA ·kcon+0x328(SB)/8, $0x2748774cdf8eeb99 DATA ·kcon+0x330(SB)/8, $0x34b0bcb5e19b48a8 DATA ·kcon+0x338(SB)/8, $0x34b0bcb5e19b48a8 DATA ·kcon+0x340(SB)/8, $0x391c0cb3c5c95a63 DATA ·kcon+0x348(SB)/8, $0x391c0cb3c5c95a63 DATA ·kcon+0x350(SB)/8, $0x4ed8aa4ae3418acb DATA ·kcon+0x358(SB)/8, $0x4ed8aa4ae3418acb DATA ·kcon+0x360(SB)/8, $0x5b9cca4f7763e373 DATA ·kcon+0x368(SB)/8, $0x5b9cca4f7763e373 DATA ·kcon+0x370(SB)/8, $0x682e6ff3d6b2b8a3 DATA ·kcon+0x378(SB)/8, $0x682e6ff3d6b2b8a3 DATA ·kcon+0x380(SB)/8, $0x748f82ee5defb2fc DATA ·kcon+0x388(SB)/8, $0x748f82ee5defb2fc DATA ·kcon+0x390(SB)/8, $0x78a5636f43172f60 DATA ·kcon+0x398(SB)/8, $0x78a5636f43172f60 DATA ·kcon+0x3A0(SB)/8, $0x84c87814a1f0ab72 DATA ·kcon+0x3A8(SB)/8, $0x84c87814a1f0ab72 DATA ·kcon+0x3B0(SB)/8, $0x8cc702081a6439ec DATA ·kcon+0x3B8(SB)/8, $0x8cc702081a6439ec DATA ·kcon+0x3C0(SB)/8, $0x90befffa23631e28 DATA ·kcon+0x3C8(SB)/8, $0x90befffa23631e28 DATA ·kcon+0x3D0(SB)/8, $0xa4506cebde82bde9 DATA ·kcon+0x3D8(SB)/8, $0xa4506cebde82bde9 DATA ·kcon+0x3E0(SB)/8, $0xbef9a3f7b2c67915 DATA ·kcon+0x3E8(SB)/8, $0xbef9a3f7b2c67915 DATA ·kcon+0x3F0(SB)/8, $0xc67178f2e372532b DATA ·kcon+0x3F8(SB)/8, $0xc67178f2e372532b DATA ·kcon+0x400(SB)/8, $0xca273eceea26619c DATA ·kcon+0x408(SB)/8, $0xca273eceea26619c DATA ·kcon+0x410(SB)/8, $0xd186b8c721c0c207 DATA ·kcon+0x418(SB)/8, $0xd186b8c721c0c207 DATA ·kcon+0x420(SB)/8, $0xeada7dd6cde0eb1e DATA ·kcon+0x428(SB)/8, $0xeada7dd6cde0eb1e DATA ·kcon+0x430(SB)/8, $0xf57d4f7fee6ed178 DATA ·kcon+0x438(SB)/8, $0xf57d4f7fee6ed178 DATA ·kcon+0x440(SB)/8, $0x06f067aa72176fba DATA ·kcon+0x448(SB)/8, $0x06f067aa72176fba DATA ·kcon+0x450(SB)/8, $0x0a637dc5a2c898a6 DATA ·kcon+0x458(SB)/8, $0x0a637dc5a2c898a6 DATA ·kcon+0x460(SB)/8, $0x113f9804bef90dae DATA ·kcon+0x468(SB)/8, $0x113f9804bef90dae DATA ·kcon+0x470(SB)/8, $0x1b710b35131c471b DATA ·kcon+0x478(SB)/8, $0x1b710b35131c471b DATA ·kcon+0x480(SB)/8, $0x28db77f523047d84 DATA ·kcon+0x488(SB)/8, $0x28db77f523047d84 DATA ·kcon+0x490(SB)/8, $0x32caab7b40c72493 DATA ·kcon+0x498(SB)/8, $0x32caab7b40c72493 DATA ·kcon+0x4A0(SB)/8, $0x3c9ebe0a15c9bebc DATA ·kcon+0x4A8(SB)/8, $0x3c9ebe0a15c9bebc DATA ·kcon+0x4B0(SB)/8, $0x431d67c49c100d4c DATA ·kcon+0x4B8(SB)/8, $0x431d67c49c100d4c DATA ·kcon+0x4C0(SB)/8, $0x4cc5d4becb3e42b6 DATA ·kcon+0x4C8(SB)/8, $0x4cc5d4becb3e42b6 DATA ·kcon+0x4D0(SB)/8, $0x597f299cfc657e2a DATA ·kcon+0x4D8(SB)/8, $0x597f299cfc657e2a DATA ·kcon+0x4E0(SB)/8, $0x5fcb6fab3ad6faec DATA ·kcon+0x4E8(SB)/8, $0x5fcb6fab3ad6faec DATA ·kcon+0x4F0(SB)/8, $0x6c44198c4a475817 DATA ·kcon+0x4F8(SB)/8, $0x6c44198c4a475817 DATA ·kcon+0x500(SB)/8, $0x0000000000000000 DATA ·kcon+0x508(SB)/8, $0x0000000000000000 DATA ·kcon+0x510(SB)/8, $0x1011121314151617 DATA ·kcon+0x518(SB)/8, $0x0001020304050607 GLOBL ·kcon(SB), RODATA, $1312 #define SHA512ROUND0(a, b, c, d, e, f, g, h, xi, idx) \ VSEL g, f, e, FUNC; \ VSHASIGMAD $15, e, $1, S1; \ VADDUDM xi, h, h; \ VSHASIGMAD $0, a, $1, S0; \ VADDUDM FUNC, h, h; \ VXOR b, a, FUNC; \ VADDUDM S1, h, h; \ VSEL b, c, FUNC, FUNC; \ VADDUDM KI, g, g; \ VADDUDM h, d, d; \ VADDUDM FUNC, S0, S0; \ LVX (TBL)(idx), KI; \ VADDUDM S0, h, h #define SHA512ROUND1(a, b, c, d, e, f, g, h, xi, xj, xj_1, xj_9, xj_14, idx) \ VSHASIGMAD $0, xj_1, $0, s0; \ VSEL g, f, e, FUNC; \ VSHASIGMAD $15, e, $1, S1; \ VADDUDM xi, h, h; \ VSHASIGMAD $0, a, $1, S0; \ VSHASIGMAD $15, xj_14, $0, s1; \ VADDUDM FUNC, h, h; \ VXOR b, a, FUNC; \ VADDUDM xj_9, xj, xj; \ VADDUDM S1, h, h; \ VSEL b, c, FUNC, FUNC; \ VADDUDM KI, g, g; \ VADDUDM h, d, d; \ VADDUDM FUNC, S0, S0; \ VADDUDM s0, xj, xj; \ LVX (TBL)(idx), KI; \ VADDUDM S0, h, h; \ VADDUDM s1, xj, xj // func block(dig *digest, p []byte) TEXT ·block(SB),0,$0-32 MOVD dig+0(FP), CTX MOVD p_base+8(FP), INP MOVD p_len+16(FP), LEN SRD $6, LEN SLD $6, LEN ADD INP, LEN, END CMP INP, END BEQ end MOVD $·kcon(SB), TBL_STRT MOVD R0, CNT MOVWZ $0x010, R_x010 MOVWZ $0x020, R_x020 MOVWZ $0x030, R_x030 MOVD $0x040, R_x040 MOVD $0x050, R_x050 MOVD $0x060, R_x060 MOVD $0x070, R_x070 MOVD $0x080, R_x080 MOVD $0x090, R_x090 MOVD $0x0a0, R_x0a0 MOVD $0x0b0, R_x0b0 MOVD $0x0c0, R_x0c0 MOVD $0x0d0, R_x0d0 MOVD $0x0e0, R_x0e0 MOVD $0x0f0, R_x0f0 MOVD $0x100, R_x100 MOVD $0x110, R_x110 #ifdef GOARCH_ppc64le // Generate the mask used with VPERM for LE MOVWZ $8, TEMP LVSL (TEMP)(R0), LEMASK VSPLTISB $0x0F, KI VXOR KI, LEMASK, LEMASK #endif LXVD2X (CTX)(R_x000), VS32 // v0 = vs32 LXVD2X (CTX)(R_x010), VS34 // v2 = vs34 LXVD2X (CTX)(R_x020), VS36 // v4 = vs36 // unpack the input values into vector registers VSLDOI $8, V0, V0, V1 LXVD2X (CTX)(R_x030), VS38 // v6 = vs38 VSLDOI $8, V2, V2, V3 VSLDOI $8, V4, V4, V5 VSLDOI $8, V6, V6, V7 loop: MOVD TBL_STRT, TBL LVX (TBL)(R_x000), KI LXVD2X (INP)(R0), VS40 // load v8 (=vs40) in advance ADD $16, INP // Copy V0-V7 to VS24-VS31 XXLOR V0, V0, VS24 XXLOR V1, V1, VS25 XXLOR V2, V2, VS26 XXLOR V3, V3, VS27 XXLOR V4, V4, VS28 XXLOR V5, V5, VS29 XXLOR V6, V6, VS30 XXLOR V7, V7, VS31 VADDUDM KI, V7, V7 // h+K[i] LVX (TBL)(R_x010), KI VPERMLE(V8,V8,LEMASK,V8) SHA512ROUND0(V0, V1, V2, V3, V4, V5, V6, V7, V8, R_x020) LXVD2X (INP)(R_x000), VS42 // load v10 (=vs42) in advance VSLDOI $8, V8, V8, V9 SHA512ROUND0(V7, V0, V1, V2, V3, V4, V5, V6, V9, R_x030) VPERMLE(V10,V10,LEMASK,V10) SHA512ROUND0(V6, V7, V0, V1, V2, V3, V4, V5, V10, R_x040) LXVD2X (INP)(R_x010), VS44 // load v12 (=vs44) in advance VSLDOI $8, V10, V10, V11 SHA512ROUND0(V5, V6, V7, V0, V1, V2, V3, V4, V11, R_x050) VPERMLE(V12,V12,LEMASK,V12) SHA512ROUND0(V4, V5, V6, V7, V0, V1, V2, V3, V12, R_x060) LXVD2X (INP)(R_x020), VS46 // load v14 (=vs46) in advance VSLDOI $8, V12, V12, V13 SHA512ROUND0(V3, V4, V5, V6, V7, V0, V1, V2, V13, R_x070) VPERMLE(V14,V14,LEMASK,V14) SHA512ROUND0(V2, V3, V4, V5, V6, V7, V0, V1, V14, R_x080) LXVD2X (INP)(R_x030), VS48 // load v16 (=vs48) in advance VSLDOI $8, V14, V14, V15 SHA512ROUND0(V1, V2, V3, V4, V5, V6, V7, V0, V15, R_x090) VPERMLE(V16,V16,LEMASK,V16) SHA512ROUND0(V0, V1, V2, V3, V4, V5, V6, V7, V16, R_x0a0) LXVD2X (INP)(R_x040), VS50 // load v18 (=vs50) in advance VSLDOI $8, V16, V16, V17 SHA512ROUND0(V7, V0, V1, V2, V3, V4, V5, V6, V17, R_x0b0) VPERMLE(V18,V18,LEMASK,V18) SHA512ROUND0(V6, V7, V0, V1, V2, V3, V4, V5, V18, R_x0c0) LXVD2X (INP)(R_x050), VS52 // load v20 (=vs52) in advance VSLDOI $8, V18, V18, V19 SHA512ROUND0(V5, V6, V7, V0, V1, V2, V3, V4, V19, R_x0d0) VPERMLE(V20,V20,LEMASK,V20) SHA512ROUND0(V4, V5, V6, V7, V0, V1, V2, V3, V20, R_x0e0) LXVD2X (INP)(R_x060), VS54 // load v22 (=vs54) in advance VSLDOI $8, V20, V20, V21 SHA512ROUND0(V3, V4, V5, V6, V7, V0, V1, V2, V21, R_x0f0) VPERMLE(V22,V22,LEMASK,V22) SHA512ROUND0(V2, V3, V4, V5, V6, V7, V0, V1, V22, R_x100) VSLDOI $8, V22, V22, V23 SHA512ROUND1(V1, V2, V3, V4, V5, V6, V7, V0, V23, V8, V9, V17, V22, R_x110) MOVWZ $4, TEMP MOVWZ TEMP, CTR ADD $0x120, TBL ADD $0x70, INP L16_xx: SHA512ROUND1(V0, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V18, V23, R_x000) SHA512ROUND1(V7, V0, V1, V2, V3, V4, V5, V6, V9, V10, V11, V19, V8, R_x010) SHA512ROUND1(V6, V7, V0, V1, V2, V3, V4, V5, V10, V11, V12, V20, V9, R_x020) SHA512ROUND1(V5, V6, V7, V0, V1, V2, V3, V4, V11, V12, V13, V21, V10, R_x030) SHA512ROUND1(V4, V5, V6, V7, V0, V1, V2, V3, V12, V13, V14, V22, V11, R_x040) SHA512ROUND1(V3, V4, V5, V6, V7, V0, V1, V2, V13, V14, V15, V23, V12, R_x050) SHA512ROUND1(V2, V3, V4, V5, V6, V7, V0, V1, V14, V15, V16, V8, V13, R_x060) SHA512ROUND1(V1, V2, V3, V4, V5, V6, V7, V0, V15, V16, V17, V9, V14, R_x070) SHA512ROUND1(V0, V1, V2, V3, V4, V5, V6, V7, V16, V17, V18, V10, V15, R_x080) SHA512ROUND1(V7, V0, V1, V2, V3, V4, V5, V6, V17, V18, V19, V11, V16, R_x090) SHA512ROUND1(V6, V7, V0, V1, V2, V3, V4, V5, V18, V19, V20, V12, V17, R_x0a0) SHA512ROUND1(V5, V6, V7, V0, V1, V2, V3, V4, V19, V20, V21, V13, V18, R_x0b0) SHA512ROUND1(V4, V5, V6, V7, V0, V1, V2, V3, V20, V21, V22, V14, V19, R_x0c0) SHA512ROUND1(V3, V4, V5, V6, V7, V0, V1, V2, V21, V22, V23, V15, V20, R_x0d0) SHA512ROUND1(V2, V3, V4, V5, V6, V7, V0, V1, V22, V23, V8, V16, V21, R_x0e0) SHA512ROUND1(V1, V2, V3, V4, V5, V6, V7, V0, V23, V8, V9, V17, V22, R_x0f0) ADD $0x100, TBL BDNZ L16_xx XXLOR VS24, VS24, V10 XXLOR VS25, VS25, V11 XXLOR VS26, VS26, V12 XXLOR VS27, VS27, V13 XXLOR VS28, VS28, V14 XXLOR VS29, VS29, V15 XXLOR VS30, VS30, V16 XXLOR VS31, VS31, V17 VADDUDM V10, V0, V0 VADDUDM V11, V1, V1 VADDUDM V12, V2, V2 VADDUDM V13, V3, V3 VADDUDM V14, V4, V4 VADDUDM V15, V5, V5 VADDUDM V16, V6, V6 VADDUDM V17, V7, V7 CMPU INP, END BLT loop #ifdef GOARCH_ppc64le VPERM V0, V1, KI, V0 VPERM V2, V3, KI, V2 VPERM V4, V5, KI, V4 VPERM V6, V7, KI, V6 #else VPERM V1, V0, KI, V0 VPERM V3, V2, KI, V2 VPERM V5, V4, KI, V4 VPERM V7, V6, KI, V6 #endif STXVD2X VS32, (CTX+R_x000) // v0 = vs32 STXVD2X VS34, (CTX+R_x010) // v2 = vs34 STXVD2X VS36, (CTX+R_x020) // v4 = vs36 STXVD2X VS38, (CTX+R_x030) // v6 = vs38 end: RET
go/src/crypto/sha512/sha512block_ppc64x.s/0
{ "file_path": "go/src/crypto/sha512/sha512block_ppc64x.s", "repo_id": "go", "token_count": 9844 }
218
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tls import ( "bytes" "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" "crypto/rsa" "errors" "fmt" "hash" "io" ) // verifyHandshakeSignature verifies a signature against pre-hashed // (if required) handshake contents. func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, signed, sig []byte) error { switch sigType { case signatureECDSA: pubKey, ok := pubkey.(*ecdsa.PublicKey) if !ok { return fmt.Errorf("expected an ECDSA public key, got %T", pubkey) } if !ecdsa.VerifyASN1(pubKey, signed, sig) { return errors.New("ECDSA verification failure") } case signatureEd25519: pubKey, ok := pubkey.(ed25519.PublicKey) if !ok { return fmt.Errorf("expected an Ed25519 public key, got %T", pubkey) } if !ed25519.Verify(pubKey, signed, sig) { return errors.New("Ed25519 verification failure") } case signaturePKCS1v15: pubKey, ok := pubkey.(*rsa.PublicKey) if !ok { return fmt.Errorf("expected an RSA public key, got %T", pubkey) } if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, signed, sig); err != nil { return err } case signatureRSAPSS: pubKey, ok := pubkey.(*rsa.PublicKey) if !ok { return fmt.Errorf("expected an RSA public key, got %T", pubkey) } signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash} if err := rsa.VerifyPSS(pubKey, hashFunc, signed, sig, signOpts); err != nil { return err } default: return errors.New("internal error: unknown signature type") } return nil } const ( serverSignatureContext = "TLS 1.3, server CertificateVerify\x00" clientSignatureContext = "TLS 1.3, client CertificateVerify\x00" ) var signaturePadding = []byte{ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, } // signedMessage returns the pre-hashed (if necessary) message to be signed by // certificate keys in TLS 1.3. See RFC 8446, Section 4.4.3. func signedMessage(sigHash crypto.Hash, context string, transcript hash.Hash) []byte { if sigHash == directSigning { b := &bytes.Buffer{} b.Write(signaturePadding) io.WriteString(b, context) b.Write(transcript.Sum(nil)) return b.Bytes() } h := sigHash.New() h.Write(signaturePadding) io.WriteString(h, context) h.Write(transcript.Sum(nil)) return h.Sum(nil) } // typeAndHashFromSignatureScheme returns the corresponding signature type and // crypto.Hash for a given TLS SignatureScheme. func typeAndHashFromSignatureScheme(signatureAlgorithm SignatureScheme) (sigType uint8, hash crypto.Hash, err error) { switch signatureAlgorithm { case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512: sigType = signaturePKCS1v15 case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512: sigType = signatureRSAPSS case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512: sigType = signatureECDSA case Ed25519: sigType = signatureEd25519 default: return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm) } switch signatureAlgorithm { case PKCS1WithSHA1, ECDSAWithSHA1: hash = crypto.SHA1 case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256: hash = crypto.SHA256 case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384: hash = crypto.SHA384 case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512: hash = crypto.SHA512 case Ed25519: hash = directSigning default: return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm) } return sigType, hash, nil } // legacyTypeAndHashFromPublicKey returns the fixed signature type and crypto.Hash for // a given public key used with TLS 1.0 and 1.1, before the introduction of // signature algorithm negotiation. func legacyTypeAndHashFromPublicKey(pub crypto.PublicKey) (sigType uint8, hash crypto.Hash, err error) { switch pub.(type) { case *rsa.PublicKey: return signaturePKCS1v15, crypto.MD5SHA1, nil case *ecdsa.PublicKey: return signatureECDSA, crypto.SHA1, nil case ed25519.PublicKey: // RFC 8422 specifies support for Ed25519 in TLS 1.0 and 1.1, // but it requires holding on to a handshake transcript to do a // full signature, and not even OpenSSL bothers with the // complexity, so we can't even test it properly. return 0, 0, fmt.Errorf("tls: Ed25519 public keys are not supported before TLS 1.2") default: return 0, 0, fmt.Errorf("tls: unsupported public key: %T", pub) } } var rsaSignatureSchemes = []struct { scheme SignatureScheme minModulusBytes int maxVersion uint16 }{ // RSA-PSS is used with PSSSaltLengthEqualsHash, and requires // emLen >= hLen + sLen + 2 {PSSWithSHA256, crypto.SHA256.Size()*2 + 2, VersionTLS13}, {PSSWithSHA384, crypto.SHA384.Size()*2 + 2, VersionTLS13}, {PSSWithSHA512, crypto.SHA512.Size()*2 + 2, VersionTLS13}, // PKCS #1 v1.5 uses prefixes from hashPrefixes in crypto/rsa, and requires // emLen >= len(prefix) + hLen + 11 // TLS 1.3 dropped support for PKCS #1 v1.5 in favor of RSA-PSS. {PKCS1WithSHA256, 19 + crypto.SHA256.Size() + 11, VersionTLS12}, {PKCS1WithSHA384, 19 + crypto.SHA384.Size() + 11, VersionTLS12}, {PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11, VersionTLS12}, {PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11, VersionTLS12}, } // signatureSchemesForCertificate returns the list of supported SignatureSchemes // for a given certificate, based on the public key and the protocol version, // and optionally filtered by its explicit SupportedSignatureAlgorithms. // // This function must be kept in sync with supportedSignatureAlgorithms. // FIPS filtering is applied in the caller, selectSignatureScheme. func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme { priv, ok := cert.PrivateKey.(crypto.Signer) if !ok { return nil } var sigAlgs []SignatureScheme switch pub := priv.Public().(type) { case *ecdsa.PublicKey: if version != VersionTLS13 { // In TLS 1.2 and earlier, ECDSA algorithms are not // constrained to a single curve. sigAlgs = []SignatureScheme{ ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512, ECDSAWithSHA1, } break } switch pub.Curve { case elliptic.P256(): sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256} case elliptic.P384(): sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384} case elliptic.P521(): sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512} default: return nil } case *rsa.PublicKey: size := pub.Size() sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes)) for _, candidate := range rsaSignatureSchemes { if size >= candidate.minModulusBytes && version <= candidate.maxVersion { sigAlgs = append(sigAlgs, candidate.scheme) } } case ed25519.PublicKey: sigAlgs = []SignatureScheme{Ed25519} default: return nil } if cert.SupportedSignatureAlgorithms != nil { var filteredSigAlgs []SignatureScheme for _, sigAlg := range sigAlgs { if isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) { filteredSigAlgs = append(filteredSigAlgs, sigAlg) } } return filteredSigAlgs } return sigAlgs } // selectSignatureScheme picks a SignatureScheme from the peer's preference list // that works with the selected certificate. It's only called for protocol // versions that support signature algorithms, so TLS 1.2 and 1.3. func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) { supportedAlgs := signatureSchemesForCertificate(vers, c) if len(supportedAlgs) == 0 { return 0, unsupportedCertificateError(c) } if len(peerAlgs) == 0 && vers == VersionTLS12 { // For TLS 1.2, if the client didn't send signature_algorithms then we // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1. peerAlgs = []SignatureScheme{PKCS1WithSHA1, ECDSAWithSHA1} } // Pick signature scheme in the peer's preference order, as our // preference order is not configurable. for _, preferredAlg := range peerAlgs { if needFIPS() && !isSupportedSignatureAlgorithm(preferredAlg, defaultSupportedSignatureAlgorithmsFIPS) { continue } if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) { return preferredAlg, nil } } return 0, errors.New("tls: peer doesn't support any of the certificate's signature algorithms") } // unsupportedCertificateError returns a helpful error for certificates with // an unsupported private key. func unsupportedCertificateError(cert *Certificate) error { switch cert.PrivateKey.(type) { case rsa.PrivateKey, ecdsa.PrivateKey: return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T", cert.PrivateKey, cert.PrivateKey) case *ed25519.PrivateKey: return fmt.Errorf("tls: unsupported certificate: private key is *ed25519.PrivateKey, expected ed25519.PrivateKey") } signer, ok := cert.PrivateKey.(crypto.Signer) if !ok { return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer", cert.PrivateKey) } switch pub := signer.Public().(type) { case *ecdsa.PublicKey: switch pub.Curve { case elliptic.P256(): case elliptic.P384(): case elliptic.P521(): default: return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name) } case *rsa.PublicKey: return fmt.Errorf("tls: certificate RSA key size too small for supported signature algorithms") case ed25519.PublicKey: default: return fmt.Errorf("tls: unsupported certificate key (%T)", pub) } if cert.SupportedSignatureAlgorithms != nil { return fmt.Errorf("tls: peer doesn't support the certificate custom signature algorithms") } return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey) }
go/src/crypto/tls/auth.go/0
{ "file_path": "go/src/crypto/tls/auth.go", "repo_id": "go", "token_count": 3788 }
219
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tls_test import ( "crypto/tls" "crypto/x509" "log" "net/http" "net/http/httptest" "os" "time" ) // zeroSource is an io.Reader that returns an unlimited number of zero bytes. type zeroSource struct{} func (zeroSource) Read(b []byte) (n int, err error) { clear(b) return len(b), nil } func ExampleDial() { // Connecting with a custom root-certificate set. const rootPEM = ` -- GlobalSign Root R2, valid until Dec 15, 2021 -----BEGIN CERTIFICATE----- MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG 3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO 291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== -----END CERTIFICATE-----` // First, create the set of root certificates. For this example we only // have one. It's also possible to omit this in order to use the // default root set of the current operating system. roots := x509.NewCertPool() ok := roots.AppendCertsFromPEM([]byte(rootPEM)) if !ok { panic("failed to parse root certificate") } conn, err := tls.Dial("tcp", "mail.google.com:443", &tls.Config{ RootCAs: roots, }) if err != nil { panic("failed to connect: " + err.Error()) } conn.Close() } func ExampleConfig_keyLogWriter() { // Debugging TLS applications by decrypting a network traffic capture. // WARNING: Use of KeyLogWriter compromises security and should only be // used for debugging. // Dummy test HTTP server for the example with insecure random so output is // reproducible. server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) server.TLS = &tls.Config{ Rand: zeroSource{}, // for example only; don't do this. } server.StartTLS() defer server.Close() // Typically the log would go to an open file: // w, err := os.OpenFile("tls-secrets.txt", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) w := os.Stdout client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{ KeyLogWriter: w, Rand: zeroSource{}, // for reproducible output; don't do this. InsecureSkipVerify: true, // test server certificate is not trusted. }, }, } resp, err := client.Get(server.URL) if err != nil { log.Fatalf("Failed to get URL: %v", err) } resp.Body.Close() // The resulting file can be used with Wireshark to decrypt the TLS // connection by setting (Pre)-Master-Secret log filename in SSL Protocol // preferences. } func ExampleLoadX509KeyPair() { cert, err := tls.LoadX509KeyPair("testdata/example-cert.pem", "testdata/example-key.pem") if err != nil { log.Fatal(err) } cfg := &tls.Config{Certificates: []tls.Certificate{cert}} listener, err := tls.Listen("tcp", ":2000", cfg) if err != nil { log.Fatal(err) } _ = listener } func ExampleX509KeyPair() { certPem := []byte(`-----BEGIN CERTIFICATE----- MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d 7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B 5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc 6MF9+Yw1Yy0t -----END CERTIFICATE-----`) keyPem := []byte(`-----BEGIN EC PRIVATE KEY----- MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== -----END EC PRIVATE KEY-----`) cert, err := tls.X509KeyPair(certPem, keyPem) if err != nil { log.Fatal(err) } cfg := &tls.Config{Certificates: []tls.Certificate{cert}} listener, err := tls.Listen("tcp", ":2000", cfg) if err != nil { log.Fatal(err) } _ = listener } func ExampleX509KeyPair_httpServer() { certPem := []byte(`-----BEGIN CERTIFICATE----- MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d 7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B 5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc 6MF9+Yw1Yy0t -----END CERTIFICATE-----`) keyPem := []byte(`-----BEGIN EC PRIVATE KEY----- MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== -----END EC PRIVATE KEY-----`) cert, err := tls.X509KeyPair(certPem, keyPem) if err != nil { log.Fatal(err) } cfg := &tls.Config{Certificates: []tls.Certificate{cert}} srv := &http.Server{ TLSConfig: cfg, ReadTimeout: time.Minute, WriteTimeout: time.Minute, } log.Fatal(srv.ListenAndServeTLS("", "")) } func ExampleConfig_verifyConnection() { // VerifyConnection can be used to replace and customize connection // verification. This example shows a VerifyConnection implementation that // will be approximately equivalent to what crypto/tls does normally to // verify the peer's certificate. // Client side configuration. _ = &tls.Config{ // Set InsecureSkipVerify to skip the default validation we are // replacing. This will not disable VerifyConnection. InsecureSkipVerify: true, VerifyConnection: func(cs tls.ConnectionState) error { opts := x509.VerifyOptions{ DNSName: cs.ServerName, Intermediates: x509.NewCertPool(), } for _, cert := range cs.PeerCertificates[1:] { opts.Intermediates.AddCert(cert) } _, err := cs.PeerCertificates[0].Verify(opts) return err }, } // Server side configuration. _ = &tls.Config{ // Require client certificates (or VerifyConnection will run anyway and // panic accessing cs.PeerCertificates[0]) but don't verify them with the // default verifier. This will not disable VerifyConnection. ClientAuth: tls.RequireAnyClientCert, VerifyConnection: func(cs tls.ConnectionState) error { opts := x509.VerifyOptions{ DNSName: cs.ServerName, Intermediates: x509.NewCertPool(), KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, } for _, cert := range cs.PeerCertificates[1:] { opts.Intermediates.AddCert(cert) } _, err := cs.PeerCertificates[0].Verify(opts) return err }, } // Note that when certificates are not handled by the default verifier // ConnectionState.VerifiedChains will be nil. }
go/src/crypto/tls/example_test.go/0
{ "file_path": "go/src/crypto/tls/example_test.go", "repo_id": "go", "token_count": 3982 }
220
>>> Flow 1 (client to server) 00000000 16 03 01 00 fe 01 00 00 fa 03 03 00 00 00 00 00 |................| 00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000020 00 00 00 00 00 00 00 00 00 00 00 20 00 00 00 00 |........... ....| 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 32 cc a9 |.............2..| 00000050 cc a8 c0 2b c0 2f c0 2c c0 30 c0 09 c0 13 c0 0a |...+./.,.0......| 00000060 c0 14 00 9c 00 9d 00 2f 00 35 c0 12 00 0a c0 23 |......./.5.....#| 00000070 c0 27 00 3c c0 07 c0 11 00 05 13 03 13 01 13 02 |.'.<............| 00000080 01 00 00 7f 00 0b 00 02 01 00 ff 01 00 01 00 00 |................| 00000090 17 00 00 00 12 00 00 00 05 00 05 01 00 00 00 00 |................| 000000a0 00 0a 00 0a 00 08 00 1d 00 17 00 18 00 19 00 0d |................| 000000b0 00 1a 00 18 08 04 04 03 08 07 08 05 08 06 04 01 |................| 000000c0 05 01 06 01 05 03 06 03 02 01 02 03 00 2b 00 09 |.............+..| 000000d0 08 03 04 03 03 03 02 03 01 00 33 00 26 00 24 00 |..........3.&.$.| 000000e0 1d 00 20 2f e5 7d a3 47 cd 62 43 15 28 da ac 5f |.. /.}.G.bC.(.._| 000000f0 bb 29 07 30 ff f6 84 af c4 cf c2 ed 90 99 5f 58 |.).0.........._X| 00000100 cb 3b 74 |.;t| >>> Flow 2 (server to client) 00000000 16 03 03 00 5d 02 00 00 59 03 03 7b 21 bd ee 4b |....]...Y..{!..K| 00000010 af e3 88 cb 18 15 e5 f3 ef a5 a7 b3 a1 66 06 b2 |.............f..| 00000020 f1 cc d7 36 7e 18 d1 f6 2e 3e cd 20 34 09 01 9f |...6~....>. 4...| 00000030 c6 80 10 43 c1 ed e6 c1 29 1c ed ac 61 36 37 4f |...C....)...a67O| 00000040 8e 00 44 9d b9 cb 51 0b a2 9c 64 be c0 2f 00 00 |..D...Q...d../..| 00000050 11 ff 01 00 01 00 00 0b 00 04 03 00 01 02 00 17 |................| 00000060 00 00 16 03 03 02 59 0b 00 02 55 00 02 52 00 02 |......Y...U..R..| 00000070 4f 30 82 02 4b 30 82 01 b4 a0 03 02 01 02 02 09 |O0..K0..........| 00000080 00 e8 f0 9d 3f e2 5b ea a6 30 0d 06 09 2a 86 48 |....?.[..0...*.H| 00000090 86 f7 0d 01 01 0b 05 00 30 1f 31 0b 30 09 06 03 |........0.1.0...| 000000a0 55 04 0a 13 02 47 6f 31 10 30 0e 06 03 55 04 03 |U....Go1.0...U..| 000000b0 13 07 47 6f 20 52 6f 6f 74 30 1e 17 0d 31 36 30 |..Go Root0...160| 000000c0 31 30 31 30 30 30 30 30 30 5a 17 0d 32 35 30 31 |101000000Z..2501| 000000d0 30 31 30 30 30 30 30 30 5a 30 1a 31 0b 30 09 06 |01000000Z0.1.0..| 000000e0 03 55 04 0a 13 02 47 6f 31 0b 30 09 06 03 55 04 |.U....Go1.0...U.| 000000f0 03 13 02 47 6f 30 81 9f 30 0d 06 09 2a 86 48 86 |...Go0..0...*.H.| 00000100 f7 0d 01 01 01 05 00 03 81 8d 00 30 81 89 02 81 |...........0....| 00000110 81 00 db 46 7d 93 2e 12 27 06 48 bc 06 28 21 ab |...F}...'.H..(!.| 00000120 7e c4 b6 a2 5d fe 1e 52 45 88 7a 36 47 a5 08 0d |~...]..RE.z6G...| 00000130 92 42 5b c2 81 c0 be 97 79 98 40 fb 4f 6d 14 fd |.B[.....y.@.Om..| 00000140 2b 13 8b c2 a5 2e 67 d8 d4 09 9e d6 22 38 b7 4a |+.....g....."8.J| 00000150 0b 74 73 2b c2 34 f1 d1 93 e5 96 d9 74 7b f3 58 |.ts+.4......t{.X| 00000160 9f 6c 61 3c c0 b0 41 d4 d9 2b 2b 24 23 77 5b 1c |.la<..A..++$#w[.| 00000170 3b bd 75 5d ce 20 54 cf a1 63 87 1d 1e 24 c4 f3 |;.u]. T..c...$..| 00000180 1d 1a 50 8b aa b6 14 43 ed 97 a7 75 62 f4 14 c8 |..P....C...ub...| 00000190 52 d7 02 03 01 00 01 a3 81 93 30 81 90 30 0e 06 |R.........0..0..| 000001a0 03 55 1d 0f 01 01 ff 04 04 03 02 05 a0 30 1d 06 |.U...........0..| 000001b0 03 55 1d 25 04 16 30 14 06 08 2b 06 01 05 05 07 |.U.%..0...+.....| 000001c0 03 01 06 08 2b 06 01 05 05 07 03 02 30 0c 06 03 |....+.......0...| 000001d0 55 1d 13 01 01 ff 04 02 30 00 30 19 06 03 55 1d |U.......0.0...U.| 000001e0 0e 04 12 04 10 9f 91 16 1f 43 43 3e 49 a6 de 6d |.........CC>I..m| 000001f0 b6 80 d7 9f 60 30 1b 06 03 55 1d 23 04 14 30 12 |....`0...U.#..0.| 00000200 80 10 48 13 49 4d 13 7e 16 31 bb a3 01 d5 ac ab |..H.IM.~.1......| 00000210 6e 7b 30 19 06 03 55 1d 11 04 12 30 10 82 0e 65 |n{0...U....0...e| 00000220 78 61 6d 70 6c 65 2e 67 6f 6c 61 6e 67 30 0d 06 |xample.golang0..| 00000230 09 2a 86 48 86 f7 0d 01 01 0b 05 00 03 81 81 00 |.*.H............| 00000240 9d 30 cc 40 2b 5b 50 a0 61 cb ba e5 53 58 e1 ed |.0.@+[P.a...SX..| 00000250 83 28 a9 58 1a a9 38 a4 95 a1 ac 31 5a 1a 84 66 |.(.X..8....1Z..f| 00000260 3d 43 d3 2d d9 0b f2 97 df d3 20 64 38 92 24 3a |=C.-...... d8.$:| 00000270 00 bc cf 9c 7d b7 40 20 01 5f aa d3 16 61 09 a2 |....}.@ ._...a..| 00000280 76 fd 13 c3 cc e1 0c 5c ee b1 87 82 f1 6c 04 ed |v......\.....l..| 00000290 73 bb b3 43 77 8d 0c 1c f1 0f a1 d8 40 83 61 c9 |s..Cw.......@.a.| 000002a0 4c 72 2b 9d ae db 46 06 06 4d f4 c1 b3 3e c0 d1 |Lr+...F..M...>..| 000002b0 bd 42 d4 db fe 3d 13 60 84 5c 21 d3 3b e9 fa e7 |.B...=.`.\!.;...| 000002c0 16 03 03 00 ac 0c 00 00 a8 03 00 1d 20 d3 22 bb |............ .".| 000002d0 c4 42 90 54 0a 43 f6 26 06 c1 ad 71 c8 82 ba 03 |.B.T.C.&...q....| 000002e0 9b cd be a4 a8 04 5a 30 69 ec b5 c9 79 04 01 00 |......Z0i...y...| 000002f0 80 ab c7 ca 24 9b db 7d 8c 81 c4 c4 46 49 2a 45 |....$..}....FI*E| 00000300 69 31 1b dc ef 01 ce 9f e4 da cf 6c 04 4c e3 4e |i1.........l.L.N| 00000310 16 84 05 fe 48 f4 21 60 fc d4 e1 6c 48 8c 87 2d |....H.!`...lH..-| 00000320 1f 56 2c ad 88 2b 5c 8d 4f 36 93 d6 a3 b1 32 4a |.V,..+\.O6....2J| 00000330 ef 0f e6 db 82 1c f2 ea 38 08 2a 62 8b a3 bd 4e |........8.*b...N| 00000340 8b 2a ae eb 0e e5 f1 88 ff 3c de f8 ed d7 c3 07 |.*.......<......| 00000350 05 92 bb e5 6d 15 23 c8 54 19 a6 cf d1 4f e2 b1 |....m.#.T....O..| 00000360 a3 4d ff 6f 22 32 5b 58 f7 58 c1 9f 58 59 b5 e2 |.M.o"2[X.X..XY..| 00000370 7b 16 03 03 00 0c 0d 00 00 08 01 01 00 02 04 01 |{...............| 00000380 00 00 16 03 03 00 04 0e 00 00 00 |...........| >>> Flow 3 (client to server) 00000000 16 03 03 01 fd 0b 00 01 f9 00 01 f6 00 01 f3 30 |...............0| 00000010 82 01 ef 30 82 01 58 a0 03 02 01 02 02 10 5c 19 |...0..X.......\.| 00000020 c1 89 65 83 55 6f dc 0b c9 b9 93 9f e9 bc 30 0d |..e.Uo........0.| 00000030 06 09 2a 86 48 86 f7 0d 01 01 0b 05 00 30 12 31 |..*.H........0.1| 00000040 10 30 0e 06 03 55 04 0a 13 07 41 63 6d 65 20 43 |.0...U....Acme C| 00000050 6f 30 1e 17 0d 31 36 30 38 31 37 32 31 35 32 33 |o0...16081721523| 00000060 31 5a 17 0d 31 37 30 38 31 37 32 31 35 32 33 31 |1Z..170817215231| 00000070 5a 30 12 31 10 30 0e 06 03 55 04 0a 13 07 41 63 |Z0.1.0...U....Ac| 00000080 6d 65 20 43 6f 30 81 9f 30 0d 06 09 2a 86 48 86 |me Co0..0...*.H.| 00000090 f7 0d 01 01 01 05 00 03 81 8d 00 30 81 89 02 81 |...........0....| 000000a0 81 00 ba 6f aa 86 bd cf bf 9f f2 ef 5c 94 60 78 |...o........\.`x| 000000b0 6f e8 13 f2 d1 96 6f cd d9 32 6e 22 37 ce 41 f9 |o.....o..2n"7.A.| 000000c0 ca 5d 29 ac e1 27 da 61 a2 ee 81 cb 10 c7 df 34 |.])..'.a.......4| 000000d0 58 95 86 e9 3d 19 e6 5c 27 73 60 c8 8d 78 02 f4 |X...=..\'s`..x..| 000000e0 1d a4 98 09 a3 19 70 69 3c 25 62 66 2a ab 22 23 |......pi<%bf*."#| 000000f0 c5 7b 85 38 4f 2e 09 73 32 a7 bd 3e 9b ad ca 84 |.{.8O..s2..>....| 00000100 07 e6 0f 3a ff 77 c5 9d 41 85 00 8a b6 9b ee b0 |...:.w..A.......| 00000110 a4 3f 2d 4c 4c e6 42 3e bb 51 c8 dd 48 54 f4 0c |.?-LL.B>.Q..HT..| 00000120 8e 47 02 03 01 00 01 a3 46 30 44 30 0e 06 03 55 |.G......F0D0...U| 00000130 1d 0f 01 01 ff 04 04 03 02 05 a0 30 13 06 03 55 |...........0...U| 00000140 1d 25 04 0c 30 0a 06 08 2b 06 01 05 05 07 03 01 |.%..0...+.......| 00000150 30 0c 06 03 55 1d 13 01 01 ff 04 02 30 00 30 0f |0...U.......0.0.| 00000160 06 03 55 1d 11 04 08 30 06 87 04 7f 00 00 01 30 |..U....0.......0| 00000170 0d 06 09 2a 86 48 86 f7 0d 01 01 0b 05 00 03 81 |...*.H..........| 00000180 81 00 46 ab 44 a2 fb 28 54 f8 5a 67 f8 62 94 f1 |..F.D..(T.Zg.b..| 00000190 9a b2 18 9e f2 b1 de 1d 7e 6f 76 95 a9 ba e7 5d |........~ov....]| 000001a0 a8 16 6c 9c f7 09 d3 37 e4 4b 2b 36 7c 01 ad 41 |..l....7.K+6|..A| 000001b0 d2 32 d8 c3 d2 93 f9 10 6b 8e 95 b9 2c 17 8a a3 |.2......k...,...| 000001c0 44 48 bc 59 13 83 16 04 88 a4 81 5c 25 0d 98 0c |DH.Y.......\%...| 000001d0 ac 11 b1 28 56 be 1d cd 61 62 84 09 bf d6 80 c6 |...(V...ab......| 000001e0 45 8d 82 2c b4 d8 83 9b db c9 22 b7 2a 12 11 7b |E..,......".*..{| 000001f0 fa 02 3b c1 c9 ff ea c9 9d a8 49 d3 95 d7 d5 0e |..;.......I.....| 00000200 e5 35 16 03 03 00 25 10 00 00 21 20 2f e5 7d a3 |.5....%...! /.}.| 00000210 47 cd 62 43 15 28 da ac 5f bb 29 07 30 ff f6 84 |G.bC.(.._.).0...| 00000220 af c4 cf c2 ed 90 99 5f 58 cb 3b 74 16 03 03 00 |......._X.;t....| 00000230 88 0f 00 00 84 04 01 00 80 8f 22 b5 46 ab 82 02 |..........".F...| 00000240 f8 a8 f8 63 d9 0f eb d4 2b 8d 02 de ce 79 d5 a1 |...c....+....y..| 00000250 20 bb 70 7a b2 36 6a b5 52 81 f5 ea f7 23 b5 41 | .pz.6j.R....#.A| 00000260 86 35 90 cc 64 4a 68 7e e3 ae c1 97 32 3b c2 a9 |.5..dJh~....2;..| 00000270 e0 f0 07 14 63 d5 08 15 59 46 f5 ea a6 39 5c 98 |....c...YF...9\.| 00000280 39 7e 17 d8 74 ae 05 a3 f5 3a 71 74 e8 b4 c9 a4 |9~..t....:qt....| 00000290 1c 82 04 ca fe 5f 97 23 8a c1 f9 ce d1 5d 0e 81 |....._.#.....]..| 000002a0 da 5f e1 b6 76 80 3c cf 9f 19 79 cf 33 d0 0a fe |._..v.<...y.3...| 000002b0 19 fc 2d 9a bb 24 cd d4 79 14 03 03 00 01 01 16 |..-..$..y.......| 000002c0 03 03 00 28 00 00 00 00 00 00 00 00 28 78 b8 0b |...(........(x..| 000002d0 59 d8 a8 11 21 ed 45 74 02 00 50 33 42 b9 0a d2 |Y...!.Et..P3B...| 000002e0 8c 9c a3 45 6e b5 9d 3e 5b 30 a9 2e |...En..>[0..| >>> Flow 4 (server to client) 00000000 14 03 03 00 01 01 16 03 03 00 28 f8 f5 85 19 36 |..........(....6| 00000010 be 6e 58 45 66 04 c4 f9 e7 fd 50 55 3b 07 6c 50 |.nXEf.....PU;.lP| 00000020 16 8c 2e 72 cf be 78 0a 8a 82 91 ed c1 72 10 d3 |...r..x......r..| 00000030 cf 42 b5 |.B.| >>> Flow 5 (client to server) 00000000 17 03 03 00 1e 00 00 00 00 00 00 00 01 53 4b a2 |.............SK.| 00000010 e4 5f 51 70 46 3d e3 41 28 8b 82 02 e2 27 8d eb |._QpF=.A(....'..| 00000020 42 65 7f 15 03 03 00 1a 00 00 00 00 00 00 00 02 |Be..............| 00000030 f3 31 ed 2c ca e5 13 e8 93 cd d7 51 4b c5 16 88 |.1.,.......QK...| 00000040 c0 a4 |..|
go/src/crypto/tls/testdata/Client-TLSv12-ClientCert-RSA-RSAPKCS1v15/0
{ "file_path": "go/src/crypto/tls/testdata/Client-TLSv12-ClientCert-RSA-RSAPKCS1v15", "repo_id": "go", "token_count": 4966 }
221
>>> Flow 1 (client to server) 00000000 16 03 01 00 fe 01 00 00 fa 03 03 00 00 00 00 00 |................| 00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000020 00 00 00 00 00 00 00 00 00 00 00 20 00 00 00 00 |........... ....| 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 32 cc a9 |.............2..| 00000050 cc a8 c0 2b c0 2f c0 2c c0 30 c0 09 c0 13 c0 0a |...+./.,.0......| 00000060 c0 14 00 9c 00 9d 00 2f 00 35 c0 12 00 0a c0 23 |......./.5.....#| 00000070 c0 27 00 3c c0 07 c0 11 00 05 13 03 13 01 13 02 |.'.<............| 00000080 01 00 00 7f 00 0b 00 02 01 00 ff 01 00 01 00 00 |................| 00000090 17 00 00 00 12 00 00 00 05 00 05 01 00 00 00 00 |................| 000000a0 00 0a 00 0a 00 08 00 1d 00 17 00 18 00 19 00 0d |................| 000000b0 00 1a 00 18 08 04 04 03 08 07 08 05 08 06 04 01 |................| 000000c0 05 01 06 01 05 03 06 03 02 01 02 03 00 2b 00 09 |.............+..| 000000d0 08 03 04 03 03 03 02 03 01 00 33 00 26 00 24 00 |..........3.&.$.| 000000e0 1d 00 20 2f e5 7d a3 47 cd 62 43 15 28 da ac 5f |.. /.}.G.bC.(.._| 000000f0 bb 29 07 30 ff f6 84 af c4 cf c2 ed 90 99 5f 58 |.).0.........._X| 00000100 cb 3b 74 |.;t| >>> Flow 2 (server to client) 00000000 16 03 03 00 5d 02 00 00 59 03 03 e3 fe 9a 75 3b |....]...Y.....u;| 00000010 82 ac ff 66 ee c2 e9 75 d9 29 cc 89 6d e0 27 19 |...f...u.)..m.'.| 00000020 62 07 0b 0b c0 49 df 17 26 af 63 20 2a 95 69 54 |b....I..&.c *.iT| 00000030 3c 5d d9 67 05 4c 45 77 d0 d9 46 29 ed 25 fd 7d |<].g.LEw..F).%.}| 00000040 b4 ef 39 d2 ee 3b a3 88 85 c5 78 9a cc a8 00 00 |..9..;....x.....| 00000050 11 ff 01 00 01 00 00 0b 00 04 03 00 01 02 00 17 |................| 00000060 00 00 16 03 03 02 59 0b 00 02 55 00 02 52 00 02 |......Y...U..R..| 00000070 4f 30 82 02 4b 30 82 01 b4 a0 03 02 01 02 02 09 |O0..K0..........| 00000080 00 e8 f0 9d 3f e2 5b ea a6 30 0d 06 09 2a 86 48 |....?.[..0...*.H| 00000090 86 f7 0d 01 01 0b 05 00 30 1f 31 0b 30 09 06 03 |........0.1.0...| 000000a0 55 04 0a 13 02 47 6f 31 10 30 0e 06 03 55 04 03 |U....Go1.0...U..| 000000b0 13 07 47 6f 20 52 6f 6f 74 30 1e 17 0d 31 36 30 |..Go Root0...160| 000000c0 31 30 31 30 30 30 30 30 30 5a 17 0d 32 35 30 31 |101000000Z..2501| 000000d0 30 31 30 30 30 30 30 30 5a 30 1a 31 0b 30 09 06 |01000000Z0.1.0..| 000000e0 03 55 04 0a 13 02 47 6f 31 0b 30 09 06 03 55 04 |.U....Go1.0...U.| 000000f0 03 13 02 47 6f 30 81 9f 30 0d 06 09 2a 86 48 86 |...Go0..0...*.H.| 00000100 f7 0d 01 01 01 05 00 03 81 8d 00 30 81 89 02 81 |...........0....| 00000110 81 00 db 46 7d 93 2e 12 27 06 48 bc 06 28 21 ab |...F}...'.H..(!.| 00000120 7e c4 b6 a2 5d fe 1e 52 45 88 7a 36 47 a5 08 0d |~...]..RE.z6G...| 00000130 92 42 5b c2 81 c0 be 97 79 98 40 fb 4f 6d 14 fd |.B[.....y.@.Om..| 00000140 2b 13 8b c2 a5 2e 67 d8 d4 09 9e d6 22 38 b7 4a |+.....g....."8.J| 00000150 0b 74 73 2b c2 34 f1 d1 93 e5 96 d9 74 7b f3 58 |.ts+.4......t{.X| 00000160 9f 6c 61 3c c0 b0 41 d4 d9 2b 2b 24 23 77 5b 1c |.la<..A..++$#w[.| 00000170 3b bd 75 5d ce 20 54 cf a1 63 87 1d 1e 24 c4 f3 |;.u]. T..c...$..| 00000180 1d 1a 50 8b aa b6 14 43 ed 97 a7 75 62 f4 14 c8 |..P....C...ub...| 00000190 52 d7 02 03 01 00 01 a3 81 93 30 81 90 30 0e 06 |R.........0..0..| 000001a0 03 55 1d 0f 01 01 ff 04 04 03 02 05 a0 30 1d 06 |.U...........0..| 000001b0 03 55 1d 25 04 16 30 14 06 08 2b 06 01 05 05 07 |.U.%..0...+.....| 000001c0 03 01 06 08 2b 06 01 05 05 07 03 02 30 0c 06 03 |....+.......0...| 000001d0 55 1d 13 01 01 ff 04 02 30 00 30 19 06 03 55 1d |U.......0.0...U.| 000001e0 0e 04 12 04 10 9f 91 16 1f 43 43 3e 49 a6 de 6d |.........CC>I..m| 000001f0 b6 80 d7 9f 60 30 1b 06 03 55 1d 23 04 14 30 12 |....`0...U.#..0.| 00000200 80 10 48 13 49 4d 13 7e 16 31 bb a3 01 d5 ac ab |..H.IM.~.1......| 00000210 6e 7b 30 19 06 03 55 1d 11 04 12 30 10 82 0e 65 |n{0...U....0...e| 00000220 78 61 6d 70 6c 65 2e 67 6f 6c 61 6e 67 30 0d 06 |xample.golang0..| 00000230 09 2a 86 48 86 f7 0d 01 01 0b 05 00 03 81 81 00 |.*.H............| 00000240 9d 30 cc 40 2b 5b 50 a0 61 cb ba e5 53 58 e1 ed |.0.@+[P.a...SX..| 00000250 83 28 a9 58 1a a9 38 a4 95 a1 ac 31 5a 1a 84 66 |.(.X..8....1Z..f| 00000260 3d 43 d3 2d d9 0b f2 97 df d3 20 64 38 92 24 3a |=C.-...... d8.$:| 00000270 00 bc cf 9c 7d b7 40 20 01 5f aa d3 16 61 09 a2 |....}.@ ._...a..| 00000280 76 fd 13 c3 cc e1 0c 5c ee b1 87 82 f1 6c 04 ed |v......\.....l..| 00000290 73 bb b3 43 77 8d 0c 1c f1 0f a1 d8 40 83 61 c9 |s..Cw.......@.a.| 000002a0 4c 72 2b 9d ae db 46 06 06 4d f4 c1 b3 3e c0 d1 |Lr+...F..M...>..| 000002b0 bd 42 d4 db fe 3d 13 60 84 5c 21 d3 3b e9 fa e7 |.B...=.`.\!.;...| 000002c0 16 03 03 00 ac 0c 00 00 a8 03 00 1d 20 ca 74 42 |............ .tB| 000002d0 03 67 f2 31 ac 68 dd 2c db 23 7d 2c 84 df e9 50 |.g.1.h.,.#},...P| 000002e0 ff ef d4 a1 fa a4 5b 0b 0b 73 20 b7 0e 08 04 00 |......[..s .....| 000002f0 80 41 60 3d b1 1c 1d 14 8a 0a 3e 5c ea ca ce 07 |.A`=......>\....| 00000300 d4 45 18 6c 16 52 40 6f b0 f4 33 38 d7 cf 11 75 |.E.l.R@o..38...u| 00000310 08 4e 7d 8b cc 64 8c 79 dd 42 fa 43 2c 9e 84 91 |.N}..d.y.B.C,...| 00000320 e9 d5 bc 9f 84 2d 10 4b db 4e a6 be 37 e9 2e 34 |.....-.K.N..7..4| 00000330 d7 3e a7 17 80 b5 cd 37 04 2a 6c 2a cc 5f 0c a1 |.>.....7.*l*._..| 00000340 ff c2 31 86 17 50 3a 47 4d 99 49 94 9c 11 12 e2 |..1..P:GM.I.....| 00000350 70 fa d1 f4 7e 3a f2 3a df 16 34 45 0a b2 7b 73 |p...~:.:..4E..{s| 00000360 fa 59 23 85 88 74 2b e6 1e cb c7 00 f5 96 c8 8a |.Y#..t+.........| 00000370 02 16 03 03 00 04 0e 00 00 00 |..........| >>> Flow 3 (client to server) 00000000 16 03 03 00 25 10 00 00 21 20 2f e5 7d a3 47 cd |....%...! /.}.G.| 00000010 62 43 15 28 da ac 5f bb 29 07 30 ff f6 84 af c4 |bC.(.._.).0.....| 00000020 cf c2 ed 90 99 5f 58 cb 3b 74 14 03 03 00 01 01 |....._X.;t......| 00000030 16 03 03 00 20 0a a7 0c 5f 47 d1 7c 5e ea 63 82 |.... ..._G.|^.c.| 00000040 83 27 6c bc 29 20 25 ec 3d 15 2d 59 94 e7 9d 2c |.'l.) %.=.-Y...,| 00000050 8b 4e 95 85 fc |.N...| >>> Flow 4 (server to client) 00000000 14 03 03 00 01 01 16 03 03 00 20 03 84 99 6b f7 |.......... ...k.| 00000010 3e 44 b1 96 6a 09 75 bf 26 4b 67 30 c4 e9 86 74 |>D..j.u.&Kg0...t| 00000020 4d e1 e3 9b fa 15 8e 1e 72 f9 5a |M.......r.Z| >>> Flow 5 (client to server) 00000000 17 03 03 00 16 68 29 2f 1b 1c 9d 28 6e b6 e5 09 |.....h)/...(n...| 00000010 4a 77 96 23 20 da 67 9e 14 ec 6b |Jw.# .g...k| >>> Flow 6 (server to client) 00000000 16 03 03 00 14 66 e8 13 d1 b5 13 cf 5c 71 7b d9 |.....f......\q{.| 00000010 63 29 3d be 68 9f f4 ad 7b |c)=.h...{| >>> Flow 7 (client to server) 00000000 16 03 03 01 1a cb 21 91 08 de 08 87 fc 63 10 93 |......!......c..| 00000010 24 a3 83 e8 99 07 d5 03 19 61 4e 8e df dd d0 f6 |$........aN.....| 00000020 50 3c fd 14 23 67 cd 74 ad 87 9b b7 8f ee a2 23 |P<..#g.t.......#| 00000030 6c 2f 90 18 f4 01 6b 31 fe ca f7 65 f9 64 5a bc |l/....k1...e.dZ.| 00000040 49 3b 25 4c 35 59 50 f2 bc 70 cb 24 2f ec 47 12 |I;%L5YP..p.$/.G.| 00000050 b6 16 22 9e ce 3b fb 17 f9 59 4e 2f 5c 03 90 47 |.."..;...YN/\..G| 00000060 d5 e8 e9 eb d9 55 bf 8f 28 e8 72 e5 15 21 42 ba |.....U..(.r..!B.| 00000070 86 65 ac b4 f5 53 7e 98 38 39 b5 7d 2a 14 d6 7b |.e...S~.89.}*..{| 00000080 3e 69 9d dc 43 33 ed 0b 8a 77 14 54 9d b0 da 88 |>i..C3...w.T....| 00000090 3b 75 50 01 07 02 99 36 39 4f 01 ee 02 cf 05 a5 |;uP....69O......| 000000a0 71 75 03 3a a5 5c c4 ce 0e 1d ba dd 9d 79 c1 84 |qu.:.\.......y..| 000000b0 81 69 7c cd 63 a4 20 a3 a2 13 8e 6e d7 01 29 8d |.i|.c. ....n..).| 000000c0 35 e2 fa 3d 74 ea 40 06 7e 0b 2d ab 3c 54 73 ef |5..=t.@.~.-.<Ts.| 000000d0 70 ad fd fa ff b3 61 da 61 c5 9f 86 8c 96 73 a6 |p.....a.a.....s.| 000000e0 f7 53 95 0e 40 33 8b 8c 25 cd 20 e1 2e e9 f4 87 |.S..@3..%. .....| 000000f0 ec 5c 1c 9c d7 98 d8 0a 96 32 12 51 dd c0 62 a4 |.\.......2.Q..b.| 00000100 3e 95 09 c8 26 a5 46 55 bd ee 13 97 7f 9c cb e9 |>...&.FU........| 00000110 43 bd e5 9f 5c 35 02 25 54 17 89 c0 be ab bc |C...\5.%T......| >>> Flow 8 (server to client) 00000000 16 03 03 00 85 10 dc 85 7d 1e 66 b2 a4 86 68 a7 |........}.f...h.| 00000010 31 f3 b1 d8 c7 c9 26 6d 74 0a c0 c0 c0 39 ad b5 |1.....&mt....9..| 00000020 1b b1 dc d3 15 9f a4 96 27 dd 65 85 ad 83 2f 97 |........'.e.../.| 00000030 42 6f 8a 9b 58 fc f8 1b 54 89 cd 9c 11 10 b9 1d |Bo..X...T.......| 00000040 c3 e1 8e 89 20 a5 2d 0b 31 b5 e0 16 54 ce 93 9b |.... .-.1...T...| 00000050 de cc b1 af 48 48 33 96 4d a6 00 78 7b 60 3f 7c |....HH3.M..x{`?|| 00000060 cd 86 64 5e 38 e8 fd 60 d4 b1 89 cd e4 fb 42 b5 |..d^8..`......B.| 00000070 82 8f e5 23 91 87 6d 54 9e 85 37 34 df ae 70 a5 |...#..mT..74..p.| 00000080 cd 4c de 95 f7 9f a9 b4 87 0b 16 03 03 02 69 d1 |.L............i.| 00000090 c2 87 4e c8 26 b3 89 88 da c0 b9 24 84 88 c0 76 |..N.&......$...v| 000000a0 94 08 eb 56 5d fc 96 81 18 f3 5b 90 f1 38 e5 ae |...V].....[..8..| 000000b0 b5 4e 9a c1 19 19 1a b3 40 55 08 cb 0e 3b 93 b3 |.N......@U...;..| 000000c0 d7 74 80 3a 7c 0d eb be 70 0e a9 50 88 d2 52 11 |.t.:|...p..P..R.| 000000d0 2d 55 cf d9 9d aa 92 bd 15 ab 1a 62 b3 26 09 3f |-U.........b.&.?| 000000e0 50 2b b8 f2 00 a2 5a 2b 98 e3 bb e2 c7 0b 07 0e |P+....Z+........| 000000f0 5a e5 2e 08 27 87 f3 0b 1b f7 f7 82 49 cf 08 d6 |Z...'.......I...| 00000100 7c 78 39 0e 92 e7 28 f4 8f 12 ac 6a c2 72 46 e8 ||x9...(....j.rF.| 00000110 92 bf 36 c7 52 46 7a 01 92 6d a9 08 19 2a 07 70 |..6.RFz..m...*.p| 00000120 8c b2 40 7d 7f 00 27 ea fb e6 24 5c d2 d8 ae 99 |..@}..'...$\....| 00000130 0b cc 2a 4b 35 12 de 85 e9 b5 e2 2d 80 39 6d 17 |..*K5......-.9m.| 00000140 7a ca ef ca bb 58 1e 13 86 10 bc 4d 8a df 32 ba |z....X.....M..2.| 00000150 3b 7e f5 40 ec af 2e 63 be f3 38 02 a9 9d 30 1f |;~.@...c..8...0.| 00000160 c4 d2 04 93 df e1 8a bb 5d f0 6a a8 d9 b9 ad bf |........].j.....| 00000170 da d6 5e bd a1 a8 b8 20 91 26 ab 50 fd 2b bf e1 |..^.... .&.P.+..| 00000180 56 33 e3 e4 4f 3f 21 ea 86 10 e9 35 84 03 70 0b |V3..O?!....5..p.| 00000190 bd dc 44 fc 58 1d bc 9e 3a 0d 3b 61 13 d6 33 1b |..D.X...:.;a..3.| 000001a0 97 a8 67 bf e6 d9 1d 34 69 70 93 4a 6c 28 88 01 |..g....4ip.Jl(..| 000001b0 01 9d 78 59 c0 58 66 c3 70 e4 ba 74 e6 0f f7 27 |..xY.Xf.p..t...'| 000001c0 8b dd 05 ac 0d 99 dc e9 c8 2f 16 ee 59 53 91 d7 |........./..YS..| 000001d0 28 27 db ad 85 e6 d0 72 28 81 31 e1 e7 bf dd 95 |('.....r(.1.....| 000001e0 75 ff d6 a9 dc 4f 30 37 52 07 87 cb 8d a7 4a 00 |u....O07R.....J.| 000001f0 22 53 3c 6d 91 94 11 5d 0a a8 21 ea 1f 18 4e 42 |"S<m...]..!...NB| 00000200 1f 11 ea 17 55 c5 0c e2 41 e2 1f 2b 7b 4c d7 72 |....U...A..+{L.r| 00000210 04 d6 64 45 e4 48 03 d7 ca ea 2b 78 0c 9b 03 b0 |..dE.H....+x....| 00000220 c6 14 cc d8 89 d4 4d 4a 82 b2 e4 69 e9 2e 7f cf |......MJ...i....| 00000230 fa 4c b8 22 85 34 ed 97 a3 71 4d ba d7 25 b1 f6 |.L.".4...qM..%..| 00000240 1c 47 99 75 7c 34 63 10 11 be 93 bb 0b 67 be 6f |.G.u|4c......g.o| 00000250 7b 41 c8 41 95 ca 4f ea 02 3d 37 5e fc 0e 12 67 |{A.A..O..=7^...g| 00000260 84 4d be 41 6f 72 b3 e8 8b 3d 8f 71 a1 f4 f7 02 |.M.Aor...=.q....| 00000270 23 66 88 29 5f d0 51 a2 c2 50 d7 cd bd bf f6 54 |#f.)_.Q..P.....T| 00000280 96 7f 26 44 16 ec f5 b7 52 11 ef a1 7e 8c 4e 22 |..&D....R...~.N"| 00000290 ed ac 1b 46 58 1a 52 ed 2c 79 64 a9 ae 8b 6c e0 |...FX.R.,yd...l.| 000002a0 2c 51 c3 cc 49 7a 90 cf 68 74 ea 59 29 f6 bb 29 |,Q..Iz..ht.Y)..)| 000002b0 b7 a9 bf 02 a3 94 d4 50 00 c0 98 30 1f 4a 72 46 |.......P...0.JrF| 000002c0 de c6 aa c9 f8 b4 2b 16 38 cd 9f 39 af e4 22 37 |......+.8..9.."7| 000002d0 4c 5a ec e0 14 bc 11 ef db db 71 6f 8e a9 6d 17 |LZ........qo..m.| 000002e0 cf 9b ae c3 e0 2f d4 80 f8 be 4f 9d 87 53 fd 53 |...../....O..S.S| 000002f0 b6 a2 cf b9 8e 56 85 39 16 03 03 00 bc 63 23 39 |.....V.9.....c#9| 00000300 de e9 93 fb f0 9b 64 b6 5e 7c a0 47 f6 b3 99 16 |......d.^|.G....| 00000310 1c 68 f3 f6 3f bd a0 fc 80 09 6d 95 9a 17 65 e0 |.h..?.....m...e.| 00000320 c2 ee fa cf fd af d8 05 5c 17 f1 1f a3 1c f2 47 |........\......G| 00000330 87 a4 07 0a aa b6 6a 21 41 96 14 85 ea d0 d5 67 |......j!A......g| 00000340 4e c2 53 fc 2a b5 5d 48 f8 f7 02 66 89 cc 7e df |N.S.*.]H...f..~.| 00000350 6f 8c d8 2d ac 64 3f 48 5b dc 25 38 8b 95 37 58 |o..-.d?H[.%8..7X| 00000360 ae a9 c8 ac 91 a1 1f fb d1 24 6b c2 13 c8 00 fd |.........$k.....| 00000370 14 73 d1 13 a6 17 28 93 f0 67 05 9c 90 5d 13 e0 |.s....(..g...]..| 00000380 b4 26 16 80 23 5c a8 b6 3e f0 80 b5 41 db f0 ef |.&..#\..>...A...| 00000390 37 c3 62 b5 30 89 4d b6 6c 5c f2 8c ff 3c 2e 48 |7.b.0.M.l\...<.H| 000003a0 bf 0f 5f 3e e0 b2 d7 64 2b dc 42 ad 2f 83 b5 6b |.._>...d+.B./..k| 000003b0 5a 04 51 f8 c4 07 c2 61 e4 16 03 03 00 4a a9 18 |Z.Q....a.....J..| 000003c0 58 79 d8 ad 86 d7 b6 77 39 76 99 b4 0a 29 72 b1 |Xy.....w9v...)r.| 000003d0 eb c3 ec 95 4d 30 4c 21 1a 7f 52 da 56 21 78 b7 |....M0L!..R.V!x.| 000003e0 fe dd d3 0e fc a9 89 40 99 97 12 68 7e 28 6e 32 |.......@...h~(n2| 000003f0 5b c0 e8 b4 42 eb ee 83 c2 ae 0d 28 99 48 46 a9 |[...B......(.HF.| 00000400 64 38 60 59 c2 dc 5c 7b 16 03 03 00 14 78 72 cf |d8`Y..\{.....xr.| 00000410 ac 5d 53 7e 73 b1 6e e2 0a 8e 12 33 be 03 86 6a |.]S~s.n....3...j| 00000420 ce |.| >>> Flow 9 (client to server) 00000000 16 03 03 02 69 33 a1 bb 7d ff 23 ba ad dc a0 1e |....i3..}.#.....| 00000010 3c 7c 1f 53 59 81 13 04 49 f7 6e bf 8f ef 11 92 |<|.SY...I.n.....| 00000020 51 bf 8a e7 fe ef 76 e4 f8 88 27 95 f4 12 f2 ed |Q.....v...'.....| 00000030 cc ec a6 2d 10 87 19 5e c9 fe cd 12 ae 1a 38 f9 |...-...^......8.| 00000040 78 f1 42 f8 47 26 1b c2 5f 82 a5 aa 65 17 eb 06 |x.B.G&.._...e...| 00000050 03 c8 22 9c 9f 8b 87 26 4a 64 18 93 9c f4 16 82 |.."....&Jd......| 00000060 28 eb 8d 2d f8 51 60 3a f4 bb a3 77 f5 97 2b 6a |(..-.Q`:...w..+j| 00000070 19 db 2e 46 8b d8 08 d8 28 88 bb bd d8 3b c2 bb |...F....(....;..| 00000080 8c 87 1b 17 a0 34 a8 c6 e8 a5 a0 e5 cb d6 9c 9d |.....4..........| 00000090 a6 a4 6c 06 6a 85 0c 5e ef 0c 97 6f b6 11 e7 4d |..l.j..^...o...M| 000000a0 60 91 95 74 48 af 1b 0b 47 03 ed 47 e9 b9 28 36 |`..tH...G..G..(6| 000000b0 74 0d 8a b8 1e 5d 1c cb ea 7b b7 8f 18 e3 b9 b9 |t....]...{......| 000000c0 43 ed 53 f2 bf 7e 8c 8f e6 c0 2a f4 31 25 f5 e8 |C.S..~....*.1%..| 000000d0 44 48 2d 36 4f a7 11 f6 ea 7c 3f d6 2d 5d 3e 39 |DH-6O....|?.-]>9| 000000e0 fb 50 f1 c9 eb 25 be 7a c5 af 54 f5 12 07 04 22 |.P...%.z..T...."| 000000f0 ec 68 45 f1 fd d1 91 0a cf 64 0f f3 58 4e e8 ce |.hE......d..XN..| 00000100 f6 9e c0 57 ea 3a bd a0 5d 26 d4 ab ed 1a 34 12 |...W.:..]&....4.| 00000110 9f fa ec 73 d6 38 06 78 85 3c f9 cd aa fb fd 32 |...s.8.x.<.....2| 00000120 cb e0 a7 d0 bb 71 b0 a3 d2 b9 89 5e a9 57 18 7e |.....q.....^.W.~| 00000130 c0 c9 2e e3 e5 07 65 6f 4c ad 22 78 50 f2 87 0f |......eoL."xP...| 00000140 cd 93 b3 19 a9 73 e2 1d bf 38 a0 20 8d 3a 3a 4e |.....s...8. .::N| 00000150 b3 02 b7 11 1f df 12 60 dd 10 1c f8 75 4a 52 86 |.......`....uJR.| 00000160 71 61 88 37 b3 bf f7 2d 45 6d 10 4a 9f 80 b3 fc |qa.7...-Em.J....| 00000170 36 d4 d3 9f 08 f2 bb ce 9c 3a 9a 59 f6 0a 0a aa |6........:.Y....| 00000180 7f 61 29 0b df 29 92 09 55 ec 1c 84 d6 1f 7c 68 |.a)..)..U.....|h| 00000190 72 00 88 5e e3 6a ee 35 45 7a 0b 49 35 0e 2d 7c |r..^.j.5Ez.I5.-|| 000001a0 ac 8b 9f 9c 94 48 2d 1e 74 24 68 14 8e 16 b4 06 |.....H-.t$h.....| 000001b0 24 66 a3 18 f4 ff 21 31 4b 9b bb 09 8a 9d 57 e4 |$f....!1K.....W.| 000001c0 a1 5c ae f1 e1 4b 5c 2a be 64 3c 7a 61 10 3d 73 |.\...K\*.d<za.=s| 000001d0 76 94 27 e9 8c 9a 8f 14 9e 9f cb 75 7e 73 14 e0 |v.'........u~s..| 000001e0 7d 78 90 3d a7 2d cb df db 2b eb b6 b1 01 aa e4 |}x.=.-...+......| 000001f0 5f 7f 39 7e eb e9 79 14 07 fa 9d d9 b6 4b 0d 7a |_.9~..y......K.z| 00000200 e7 ea 17 d5 c8 0b bd 12 c3 2d fa b1 cc 36 97 a3 |.........-...6..| 00000210 c3 ac 03 70 f7 47 6d 79 af 96 8f c0 8b b1 84 b7 |...p.Gmy........| 00000220 51 e5 f8 80 e5 de 8a 78 5c 9e 0a cd 0c c9 e0 27 |Q......x\......'| 00000230 bc 9a 9b 88 f2 94 9d 3f a2 a7 72 81 43 3f ca fd |.......?..r.C?..| 00000240 19 ba d1 aa 73 0f 5c 70 67 b7 38 68 18 99 e8 e8 |....s.\pg.8h....| 00000250 51 8a 30 68 81 cc 1b ed 41 46 5d d4 cc 37 9f 8a |Q.0h....AF]..7..| 00000260 af 70 96 15 22 2e 5c 0d d0 9b e9 c3 f6 76 16 03 |.p..".\......v..| 00000270 03 00 35 e7 e7 b7 b8 af 17 8f 2f 52 a9 1c 9a 33 |..5......./R...3| 00000280 c5 e9 3d b3 b7 25 5b 80 c1 6e 71 c8 44 bd 27 32 |..=..%[..nq.D.'2| 00000290 16 44 ee 64 a9 1c 6e 4c 84 55 33 a0 d4 5b fc 94 |.D.d..nL.U3..[..| 000002a0 77 15 ff 82 11 af a4 f7 16 03 03 00 98 c0 03 0e |w...............| 000002b0 00 a5 45 5a 08 4b fd a8 d0 1b d1 f0 12 e3 62 dd |..EZ.K........b.| 000002c0 91 dc 81 b2 6a 51 e3 d9 16 4c 48 c4 9a 66 bb 46 |....jQ...LH..f.F| 000002d0 f9 e1 58 04 f7 47 18 ed 6d 4a 78 78 6e 84 b2 38 |..X..G..mJxxn..8| 000002e0 e2 51 a6 3a 4c fe 76 d9 ef 3e b2 d9 b2 c0 5e 4f |.Q.:L.v..>....^O| 000002f0 f7 4b 38 70 d0 82 42 f2 5a c9 34 d2 76 7b 8d e4 |.K8p..B.Z.4.v{..| 00000300 0b c7 3d be 86 7d 21 8c f8 9f 1a 4a dc 48 07 12 |..=..}!....J.H..| 00000310 ab 4c 98 f7 8b 3e 02 49 c2 d1 b5 e9 9a e0 5a 74 |.L...>.I......Zt| 00000320 58 ce a3 33 be 0f ca 12 54 a2 0e a9 e5 22 8f 82 |X..3....T...."..| 00000330 df 1a 3e 21 bf 66 c6 13 39 77 83 95 b1 4b 1d d3 |..>!.f..9w...K..| 00000340 32 69 24 c2 89 14 03 03 00 11 1c eb 63 31 c9 c9 |2i$.........c1..| 00000350 8a e3 8b 25 7e e9 b7 7d 19 03 3c 16 03 03 00 20 |...%~..}..<.... | 00000360 c6 41 57 04 ab e8 6c 55 0b dc cd 06 f5 b3 4b 95 |.AW...lU......K.| 00000370 40 b5 04 47 da 71 d7 70 ce d6 2e 0b c7 66 16 3c |@..G.q.p.....f.<| >>> Flow 10 (server to client) 00000000 14 03 03 00 11 c0 4e fd fa 3a 1c 4a 19 ce 08 f6 |......N..:.J....| 00000010 da 70 56 fa 39 42 16 03 03 00 20 0c bc c4 5e 24 |.pV.9B.... ...^$| 00000020 e3 0e c9 23 8c cd f6 a2 45 07 e6 93 40 7c c8 02 |...#....E...@|..| 00000030 6e 24 f7 4e 06 9a ad ba de cd 10 17 03 03 00 19 |n$.N............| 00000040 52 e0 85 b3 46 25 7d fd 8f d0 4c ca df fe 2f a6 |R...F%}...L.../.| 00000050 14 d6 08 82 70 0f 93 78 bf 16 03 03 00 14 ff 68 |....p..x.......h| 00000060 f1 6d 8d b4 5f 74 19 6b e3 1e 6e ee 9d ee c2 34 |.m.._t.k..n....4| 00000070 0f 44 |.D| >>> Flow 11 (client to server) 00000000 15 03 03 00 12 81 0d 63 a5 11 7a 03 ab 66 f6 c8 |.......c..z..f..| 00000010 15 f3 d9 23 fa 67 6b 15 03 03 00 12 71 0e 3b 52 |...#.gk.....q.;R| 00000020 1a 05 39 4c 8c 76 c3 a9 00 35 bd 66 80 a1 |..9L.v...5.f..|
go/src/crypto/tls/testdata/Client-TLSv12-RenegotiateTwiceRejected/0
{ "file_path": "go/src/crypto/tls/testdata/Client-TLSv12-RenegotiateTwiceRejected", "repo_id": "go", "token_count": 9650 }
222
>>> Flow 1 (client to server) 00000000 16 03 01 00 fe 01 00 00 fa 03 03 00 00 00 00 00 |................| 00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000020 00 00 00 00 00 00 00 00 00 00 00 20 00 00 00 00 |........... ....| 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 32 cc a9 |.............2..| 00000050 cc a8 c0 2b c0 2f c0 2c c0 30 c0 09 c0 13 c0 0a |...+./.,.0......| 00000060 c0 14 00 9c 00 9d 00 2f 00 35 c0 12 00 0a c0 23 |......./.5.....#| 00000070 c0 27 00 3c c0 07 c0 11 00 05 13 03 13 01 13 02 |.'.<............| 00000080 01 00 00 7f 00 0b 00 02 01 00 ff 01 00 01 00 00 |................| 00000090 17 00 00 00 12 00 00 00 05 00 05 01 00 00 00 00 |................| 000000a0 00 0a 00 0a 00 08 00 1d 00 17 00 18 00 19 00 0d |................| 000000b0 00 1a 00 18 08 04 04 03 08 07 08 05 08 06 04 01 |................| 000000c0 05 01 06 01 05 03 06 03 02 01 02 03 00 2b 00 09 |.............+..| 000000d0 08 03 04 03 03 03 02 03 01 00 33 00 26 00 24 00 |..........3.&.$.| 000000e0 1d 00 20 2f e5 7d a3 47 cd 62 43 15 28 da ac 5f |.. /.}.G.bC.(.._| 000000f0 bb 29 07 30 ff f6 84 af c4 cf c2 ed 90 99 5f 58 |.).0.........._X| 00000100 cb 3b 74 |.;t| >>> Flow 2 (server to client) 00000000 16 03 03 00 7a 02 00 00 76 03 03 dc 81 c4 82 2e |....z...v.......| 00000010 a2 4f c4 c2 53 c2 bc 6a bd f3 46 84 b5 ba 66 b5 |.O..S..j..F...f.| 00000020 8b 67 7c 90 51 40 12 39 18 e1 bf 20 00 00 00 00 |.g|.Q@.9... ....| 00000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000040 00 00 00 00 00 00 00 00 00 00 00 00 13 03 00 00 |................| 00000050 2e 00 2b 00 02 03 04 00 33 00 24 00 1d 00 20 3b |..+.....3.$... ;| 00000060 6b 2a e6 c2 7e b6 59 68 e4 e3 f2 f3 14 e5 72 bc |k*..~.Yh......r.| 00000070 c9 61 b4 b0 0a c6 41 0d a9 8e d9 9b 7d 2a 11 14 |.a....A.....}*..| 00000080 03 03 00 01 01 17 03 03 00 17 43 af 38 b9 56 06 |..........C.8.V.| 00000090 2d 10 e3 e5 1d 1b 1e a9 5f 90 ca 0d a9 52 33 86 |-......._....R3.| 000000a0 85 17 03 03 02 6d ac 6a a1 8f 42 27 74 80 25 f9 |.....m.j..B't.%.| 000000b0 1f 48 49 2d c2 33 38 e7 93 7e b0 b2 50 b8 6a ea |.HI-.38..~..P.j.| 000000c0 a6 81 ef 9b 55 83 4e 93 df 92 97 6f 00 f5 c4 fc |....U.N....o....| 000000d0 ec b1 19 dd 68 b5 bd c4 bb ba 63 9a e4 c9 24 af |....h.....c...$.| 000000e0 88 13 65 11 bf ea d9 07 e9 46 fd 5b 60 ce 57 46 |..e......F.[`.WF| 000000f0 8b a9 bd c6 58 1a 3b bd 5e fb 0f 46 ec fc 8b 2c |....X.;.^..F...,| 00000100 ea a7 19 06 6a e5 6f 10 7a 27 04 6b aa a4 2c f4 |....j.o.z'.k..,.| 00000110 ef 3b e8 8a 51 88 fd e0 ae 33 b1 4c b3 04 5e 91 |.;..Q....3.L..^.| 00000120 b0 98 0b 9f 38 a3 3c fb 9f d5 d2 36 e4 09 19 18 |....8.<....6....| 00000130 a5 b3 12 aa c9 03 ac b5 ab bb f1 7a 02 d2 dd 75 |...........z...u| 00000140 0e cb 60 09 39 23 c3 b2 c1 8e e0 18 57 72 54 61 |..`.9#......WrTa| 00000150 4c 99 35 1d ba 31 01 0c 48 d2 f2 88 22 9c 91 7d |L.5..1..H..."..}| 00000160 e3 74 f9 b3 52 bf 0e 0b e1 31 7c 2c cb fd f2 8c |.t..R....1|,....| 00000170 bf 27 40 6d 26 b7 62 47 56 91 22 00 67 9a df 4f |.'@m&.bGV.".g..O| 00000180 f0 47 57 3c a6 46 4d 16 f6 8d fc 2d 91 c4 1a bf |.GW<.FM....-....| 00000190 38 63 ec 63 fe 97 14 80 aa 5b 60 ff c8 77 57 9c |8c.c.....[`..wW.| 000001a0 d5 86 a6 76 96 b3 e9 db c9 eb dd 94 84 2a 46 f5 |...v.........*F.| 000001b0 6a c1 10 66 59 f1 13 b9 41 f3 89 26 ba 52 69 95 |j..fY...A..&.Ri.| 000001c0 b1 f3 66 30 f8 aa 90 f7 90 49 19 48 4c 25 4a 1f |..f0.....I.HL%J.| 000001d0 12 9d 67 32 79 bb 53 d8 c5 d1 b4 6e 89 75 49 c0 |..g2y.S....n.uI.| 000001e0 65 86 ac 72 23 2f 97 d3 ae e2 64 79 5e e2 10 4e |e..r#/....dy^..N| 000001f0 55 0c c6 70 d3 2e 4a 6c b0 73 0a 11 eb ae f7 a1 |U..p..Jl.s......| 00000200 a1 f0 5f 67 45 46 d3 8c 11 ff 21 62 7d ed f9 0e |.._gEF....!b}...| 00000210 2a ba b3 82 f5 6b c1 4a 4e cc 11 90 48 81 96 7a |*....k.JN...H..z| 00000220 df f9 22 ae 53 31 14 9d c9 5c 85 e7 db a2 dd 02 |..".S1...\......| 00000230 56 eb d0 fe 20 35 21 c9 33 63 b5 b7 a8 93 30 7f |V... 5!.3c....0.| 00000240 86 1f d1 af b1 ff 3e 9a d1 a8 90 d9 9c 86 55 e8 |......>.......U.| 00000250 d0 4c c4 6d a3 ce c6 c0 df f8 a8 b6 43 03 ae fc |.L.m........C...| 00000260 7d 94 7b fe be 85 46 d8 42 9d b6 15 b9 a3 27 3d |}.{...F.B.....'=| 00000270 80 64 54 c8 53 c4 a7 94 52 8f 9d 4c 58 54 a5 c4 |.dT.S...R..LXT..| 00000280 e0 e7 2e cb f3 8c d0 82 3f 95 76 c9 ea ea 80 41 |........?.v....A| 00000290 21 5d 3e a3 1e be 4e 0b ce 10 ab 61 a5 76 ef 62 |!]>...N....a.v.b| 000002a0 50 1b 52 a8 75 23 fd eb ea 76 f9 d8 41 3c a2 e4 |P.R.u#...v..A<..| 000002b0 21 cb 56 f7 40 81 78 56 22 06 2a 38 fc 1f d8 9f |!.V.@.xV".*8....| 000002c0 38 b0 7a 93 f0 8c ad 3e 54 27 a2 d7 8b 2c 79 46 |8.z....>T'...,yF| 000002d0 15 65 f1 55 b2 2a 06 a1 97 9b 47 23 f8 9a 3f 88 |.e.U.*....G#..?.| 000002e0 8e 26 7e 13 cd 6e 8b cb d5 a5 78 48 f7 ba ad d8 |.&~..n....xH....| 000002f0 08 3b 34 5b 52 cd e3 2d 12 ac 81 00 c0 d0 4d df |.;4[R..-......M.| 00000300 56 d6 40 86 91 31 3d ba 6b 41 bc 51 6f ac b2 df |V.@..1=.kA.Qo...| 00000310 90 4b 78 17 03 03 00 99 21 0f 5b 18 54 84 98 0c |.Kx.....!.[.T...| 00000320 3a 7b 0b db 99 0e 09 f6 b7 4f a9 cc da bf 4c ac |:{.......O....L.| 00000330 5f 44 fc ba 9d 5d 52 d5 ec 2e 08 0c cc 3c e1 72 |_D...]R......<.r| 00000340 10 77 5b 7b 55 f7 c1 44 a3 25 e0 48 20 9a 3a de |.w[{U..D.%.H .:.| 00000350 2f ae 30 a9 e9 5b 75 84 e3 59 f0 6b 23 a6 d8 20 |/.0..[u..Y.k#.. | 00000360 16 51 2a 19 61 60 35 28 74 41 32 fa 97 8d 3b ed |.Q*.a`5(tA2...;.| 00000370 ff 64 94 d7 27 4c 9b 1e 5e b8 89 43 e3 ae 2a b6 |.d..'L..^..C..*.| 00000380 60 a8 bb f0 d1 75 85 15 bb 95 3c 82 f1 62 da b1 |`....u....<..b..| 00000390 25 d2 58 7d 67 7f 02 5b b4 91 68 3c 70 10 09 94 |%.X}g..[..h<p...| 000003a0 b0 6d ca d9 39 bf 4a 16 5e 44 e8 92 a9 f5 af 81 |.m..9.J.^D......| 000003b0 08 17 03 03 00 35 46 1a 28 22 ca 25 09 12 b5 e6 |.....5F.(".%....| 000003c0 7e 70 f1 0a 0d 9b 7d f2 59 98 41 d5 9d fd be 8e |~p....}.Y.A.....| 000003d0 ff b2 89 e7 1a 3c 9a b7 50 01 ce af 22 4a 4b ce |.....<..P..."JK.| 000003e0 51 63 56 c3 93 28 29 fb b2 67 1d |QcV..()..g.| >>> Flow 3 (client to server) 00000000 14 03 03 00 01 01 17 03 03 00 35 c9 09 ac f5 44 |..........5....D| 00000010 40 83 88 5a b9 46 70 b9 ff 9b 2e bb b4 7e 72 b0 |@..Z.Fp......~r.| 00000020 85 26 d6 37 33 ec d1 ac ce f4 db 72 8c e1 07 b5 |.&.73......r....| 00000030 d0 ce ee 2f 19 77 62 ec 97 ae 1b e1 5f 85 bf c4 |.../.wb....._...| 00000040 17 03 03 00 17 1c ae 0f 1d 50 be 4b d0 64 4f 23 |.........P.K.dO#| 00000050 41 60 d9 c7 f5 60 a6 5c 38 14 fd d5 |A`...`.\8...| >>> Flow 4 (server to client) 00000000 17 03 03 00 16 ad 61 13 66 9c 3c dd 88 42 2c 6c |......a.f.<..B,l| 00000010 d1 22 5d b4 b2 6f f0 68 4f 37 4d |."]..o.hO7M| >>> Flow 5 (client to server) 00000000 17 03 03 00 16 e5 b5 ff ad d4 f5 e5 55 04 83 a7 |............U...| 00000010 59 43 9a 3e 68 4d 38 6f b1 1c 30 |YC.>hM8o..0| >>> Flow 6 (server to client) 00000000 17 03 03 00 1a 64 0c f4 8e 8f 2b 04 e2 36 28 77 |.....d....+..6(w| 00000010 a2 28 97 4b 15 ba 1b b2 10 31 b3 4d 87 09 af |.(.K.....1.M...| >>> Flow 7 (client to server) 00000000 17 03 03 00 1d 90 c9 33 bf fc 76 12 3d af 9e c8 |.......3..v.=...| 00000010 8c ca e7 a1 63 6e 80 91 b1 7f 5e e4 dc c2 e6 1c |....cn....^.....| 00000020 b6 2b 17 03 03 00 13 ef a8 30 91 b1 20 fe 82 79 |.+.......0.. ..y| 00000030 44 31 a8 af 99 bb 5e 97 75 a3 |D1....^.u.|
go/src/crypto/tls/testdata/Client-TLSv13-KeyUpdate/0
{ "file_path": "go/src/crypto/tls/testdata/Client-TLSv13-KeyUpdate", "repo_id": "go", "token_count": 3852 }
223
>>> Flow 1 (client to server) 00000000 16 03 01 00 6d 01 00 00 69 03 03 e9 31 0f d0 25 |....m...i...1..%| 00000010 ef 25 a7 1a 9b 8c 4b a3 ca 2b a6 54 89 1c e1 68 |.%....K..+.T...h| 00000020 6f b2 b2 60 6f 8a dc 87 24 8c 7b 00 00 04 00 2f |o..`o...$.{..../| 00000030 00 ff 01 00 00 3c 00 16 00 00 00 17 00 00 00 0d |.....<..........| 00000040 00 30 00 2e 04 03 05 03 06 03 08 07 08 08 08 09 |.0..............| 00000050 08 0a 08 0b 08 04 08 05 08 06 04 01 05 01 06 01 |................| 00000060 03 03 02 03 03 01 02 01 03 02 02 02 04 02 05 02 |................| 00000070 06 02 |..| >>> Flow 2 (server to client) 00000000 16 03 03 00 35 02 00 00 31 03 03 00 00 00 00 00 |....5...1.......| 00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000020 00 00 00 44 4f 57 4e 47 52 44 01 00 00 2f 00 00 |...DOWNGRD.../..| 00000030 09 ff 01 00 01 00 00 17 00 00 16 03 03 02 59 0b |..............Y.| 00000040 00 02 55 00 02 52 00 02 4f 30 82 02 4b 30 82 01 |..U..R..O0..K0..| 00000050 b4 a0 03 02 01 02 02 09 00 e8 f0 9d 3f e2 5b ea |............?.[.| 00000060 a6 30 0d 06 09 2a 86 48 86 f7 0d 01 01 0b 05 00 |.0...*.H........| 00000070 30 1f 31 0b 30 09 06 03 55 04 0a 13 02 47 6f 31 |0.1.0...U....Go1| 00000080 10 30 0e 06 03 55 04 03 13 07 47 6f 20 52 6f 6f |.0...U....Go Roo| 00000090 74 30 1e 17 0d 31 36 30 31 30 31 30 30 30 30 30 |t0...16010100000| 000000a0 30 5a 17 0d 32 35 30 31 30 31 30 30 30 30 30 30 |0Z..250101000000| 000000b0 5a 30 1a 31 0b 30 09 06 03 55 04 0a 13 02 47 6f |Z0.1.0...U....Go| 000000c0 31 0b 30 09 06 03 55 04 03 13 02 47 6f 30 81 9f |1.0...U....Go0..| 000000d0 30 0d 06 09 2a 86 48 86 f7 0d 01 01 01 05 00 03 |0...*.H.........| 000000e0 81 8d 00 30 81 89 02 81 81 00 db 46 7d 93 2e 12 |...0.......F}...| 000000f0 27 06 48 bc 06 28 21 ab 7e c4 b6 a2 5d fe 1e 52 |'.H..(!.~...]..R| 00000100 45 88 7a 36 47 a5 08 0d 92 42 5b c2 81 c0 be 97 |E.z6G....B[.....| 00000110 79 98 40 fb 4f 6d 14 fd 2b 13 8b c2 a5 2e 67 d8 |y.@.Om..+.....g.| 00000120 d4 09 9e d6 22 38 b7 4a 0b 74 73 2b c2 34 f1 d1 |...."8.J.ts+.4..| 00000130 93 e5 96 d9 74 7b f3 58 9f 6c 61 3c c0 b0 41 d4 |....t{.X.la<..A.| 00000140 d9 2b 2b 24 23 77 5b 1c 3b bd 75 5d ce 20 54 cf |.++$#w[.;.u]. T.| 00000150 a1 63 87 1d 1e 24 c4 f3 1d 1a 50 8b aa b6 14 43 |.c...$....P....C| 00000160 ed 97 a7 75 62 f4 14 c8 52 d7 02 03 01 00 01 a3 |...ub...R.......| 00000170 81 93 30 81 90 30 0e 06 03 55 1d 0f 01 01 ff 04 |..0..0...U......| 00000180 04 03 02 05 a0 30 1d 06 03 55 1d 25 04 16 30 14 |.....0...U.%..0.| 00000190 06 08 2b 06 01 05 05 07 03 01 06 08 2b 06 01 05 |..+.........+...| 000001a0 05 07 03 02 30 0c 06 03 55 1d 13 01 01 ff 04 02 |....0...U.......| 000001b0 30 00 30 19 06 03 55 1d 0e 04 12 04 10 9f 91 16 |0.0...U.........| 000001c0 1f 43 43 3e 49 a6 de 6d b6 80 d7 9f 60 30 1b 06 |.CC>I..m....`0..| 000001d0 03 55 1d 23 04 14 30 12 80 10 48 13 49 4d 13 7e |.U.#..0...H.IM.~| 000001e0 16 31 bb a3 01 d5 ac ab 6e 7b 30 19 06 03 55 1d |.1......n{0...U.| 000001f0 11 04 12 30 10 82 0e 65 78 61 6d 70 6c 65 2e 67 |...0...example.g| 00000200 6f 6c 61 6e 67 30 0d 06 09 2a 86 48 86 f7 0d 01 |olang0...*.H....| 00000210 01 0b 05 00 03 81 81 00 9d 30 cc 40 2b 5b 50 a0 |.........0.@+[P.| 00000220 61 cb ba e5 53 58 e1 ed 83 28 a9 58 1a a9 38 a4 |a...SX...(.X..8.| 00000230 95 a1 ac 31 5a 1a 84 66 3d 43 d3 2d d9 0b f2 97 |...1Z..f=C.-....| 00000240 df d3 20 64 38 92 24 3a 00 bc cf 9c 7d b7 40 20 |.. d8.$:....}.@ | 00000250 01 5f aa d3 16 61 09 a2 76 fd 13 c3 cc e1 0c 5c |._...a..v......\| 00000260 ee b1 87 82 f1 6c 04 ed 73 bb b3 43 77 8d 0c 1c |.....l..s..Cw...| 00000270 f1 0f a1 d8 40 83 61 c9 4c 72 2b 9d ae db 46 06 |....@.a.Lr+...F.| 00000280 06 4d f4 c1 b3 3e c0 d1 bd 42 d4 db fe 3d 13 60 |.M...>...B...=.`| 00000290 84 5c 21 d3 3b e9 fa e7 16 03 03 00 23 0d 00 00 |.\!.;.......#...| 000002a0 1f 02 01 40 00 18 08 04 04 03 08 07 08 05 08 06 |...@............| 000002b0 04 01 05 01 06 01 05 03 06 03 02 01 02 03 00 00 |................| 000002c0 16 03 03 00 04 0e 00 00 00 |.........| >>> Flow 3 (client to server) 00000000 16 03 03 01 fd 0b 00 01 f9 00 01 f6 00 01 f3 30 |...............0| 00000010 82 01 ef 30 82 01 58 a0 03 02 01 02 02 10 5c 19 |...0..X.......\.| 00000020 c1 89 65 83 55 6f dc 0b c9 b9 93 9f e9 bc 30 0d |..e.Uo........0.| 00000030 06 09 2a 86 48 86 f7 0d 01 01 0b 05 00 30 12 31 |..*.H........0.1| 00000040 10 30 0e 06 03 55 04 0a 13 07 41 63 6d 65 20 43 |.0...U....Acme C| 00000050 6f 30 1e 17 0d 31 36 30 38 31 37 32 31 35 32 33 |o0...16081721523| 00000060 31 5a 17 0d 31 37 30 38 31 37 32 31 35 32 33 31 |1Z..170817215231| 00000070 5a 30 12 31 10 30 0e 06 03 55 04 0a 13 07 41 63 |Z0.1.0...U....Ac| 00000080 6d 65 20 43 6f 30 81 9f 30 0d 06 09 2a 86 48 86 |me Co0..0...*.H.| 00000090 f7 0d 01 01 01 05 00 03 81 8d 00 30 81 89 02 81 |...........0....| 000000a0 81 00 ba 6f aa 86 bd cf bf 9f f2 ef 5c 94 60 78 |...o........\.`x| 000000b0 6f e8 13 f2 d1 96 6f cd d9 32 6e 22 37 ce 41 f9 |o.....o..2n"7.A.| 000000c0 ca 5d 29 ac e1 27 da 61 a2 ee 81 cb 10 c7 df 34 |.])..'.a.......4| 000000d0 58 95 86 e9 3d 19 e6 5c 27 73 60 c8 8d 78 02 f4 |X...=..\'s`..x..| 000000e0 1d a4 98 09 a3 19 70 69 3c 25 62 66 2a ab 22 23 |......pi<%bf*."#| 000000f0 c5 7b 85 38 4f 2e 09 73 32 a7 bd 3e 9b ad ca 84 |.{.8O..s2..>....| 00000100 07 e6 0f 3a ff 77 c5 9d 41 85 00 8a b6 9b ee b0 |...:.w..A.......| 00000110 a4 3f 2d 4c 4c e6 42 3e bb 51 c8 dd 48 54 f4 0c |.?-LL.B>.Q..HT..| 00000120 8e 47 02 03 01 00 01 a3 46 30 44 30 0e 06 03 55 |.G......F0D0...U| 00000130 1d 0f 01 01 ff 04 04 03 02 05 a0 30 13 06 03 55 |...........0...U| 00000140 1d 25 04 0c 30 0a 06 08 2b 06 01 05 05 07 03 01 |.%..0...+.......| 00000150 30 0c 06 03 55 1d 13 01 01 ff 04 02 30 00 30 0f |0...U.......0.0.| 00000160 06 03 55 1d 11 04 08 30 06 87 04 7f 00 00 01 30 |..U....0.......0| 00000170 0d 06 09 2a 86 48 86 f7 0d 01 01 0b 05 00 03 81 |...*.H..........| 00000180 81 00 46 ab 44 a2 fb 28 54 f8 5a 67 f8 62 94 f1 |..F.D..(T.Zg.b..| 00000190 9a b2 18 9e f2 b1 de 1d 7e 6f 76 95 a9 ba e7 5d |........~ov....]| 000001a0 a8 16 6c 9c f7 09 d3 37 e4 4b 2b 36 7c 01 ad 41 |..l....7.K+6|..A| 000001b0 d2 32 d8 c3 d2 93 f9 10 6b 8e 95 b9 2c 17 8a a3 |.2......k...,...| 000001c0 44 48 bc 59 13 83 16 04 88 a4 81 5c 25 0d 98 0c |DH.Y.......\%...| 000001d0 ac 11 b1 28 56 be 1d cd 61 62 84 09 bf d6 80 c6 |...(V...ab......| 000001e0 45 8d 82 2c b4 d8 83 9b db c9 22 b7 2a 12 11 7b |E..,......".*..{| 000001f0 fa 02 3b c1 c9 ff ea c9 9d a8 49 d3 95 d7 d5 0e |..;.......I.....| 00000200 e5 35 16 03 03 00 86 10 00 00 82 00 80 29 51 da |.5...........)Q.| 00000210 8e 5c 3e fb 44 8a 0f 97 42 23 8b e2 73 cc e2 90 |.\>.D...B#..s...| 00000220 11 c4 98 01 e9 60 96 9e a9 96 30 c5 95 f8 56 0e |.....`....0...V.| 00000230 4a 2e 77 e7 7e 23 b7 49 31 c4 87 c5 69 c6 ca 6f |J.w.~#.I1...i..o| 00000240 ea 53 41 b4 2e 1e f6 0b 33 f5 e1 40 69 c0 91 6f |.SA.....3..@i..o| 00000250 88 c1 68 c8 18 99 6e fe b3 5f 9b ee f1 4a 76 41 |..h...n.._...JvA| 00000260 1f d1 05 f5 39 76 61 e6 a6 ea 75 0e 50 32 a1 19 |....9va...u.P2..| 00000270 20 6a 4c 5d 62 6e 2a 6e af f9 9c 38 b6 3a bc 86 | jL]bn*n...8.:..| 00000280 eb ac 6d d3 b5 48 30 11 4d 98 2e 61 34 16 03 03 |..m..H0.M..a4...| 00000290 00 88 0f 00 00 84 08 04 00 80 82 ed 3f da b5 50 |............?..P| 000002a0 d2 50 51 14 cf ee f7 b9 7b a9 0c 77 2f 88 42 0a |.PQ.....{..w/.B.| 000002b0 34 a9 5d e7 32 26 3a 28 87 49 fb c4 83 31 68 c6 |4.].2&:(.I...1h.| 000002c0 0d 32 d4 31 0a d1 d6 1e 6f 7f 89 93 bf b7 7c c7 |.2.1....o.....|.| 000002d0 95 f8 c3 69 d8 58 4e e4 76 07 36 84 b7 c3 e7 22 |...i.XN.v.6...."| 000002e0 01 4c 59 ae 89 95 bb e0 07 e0 31 6a e2 95 4c d4 |.LY.......1j..L.| 000002f0 01 54 9d 27 82 60 31 13 39 07 47 c2 0c 08 5c d4 |.T.'.`1.9.G...\.| 00000300 03 5a 6f d7 89 a0 67 5e 2d a0 11 03 bf 0e 35 d8 |.Zo...g^-.....5.| 00000310 d0 78 2f 1e d8 15 47 ce c9 d3 14 03 03 00 01 01 |.x/...G.........| 00000320 16 03 03 00 40 d0 0a 0e 93 dd 9a 51 4f a9 7f 5f |....@......QO.._| 00000330 93 a6 60 a6 f2 10 f1 bd bd ae 13 5d 11 b7 0d 1a |..`........]....| 00000340 3d 1e f3 0c b7 53 7c 10 ed fa 8c d7 3f 20 ec f2 |=....S|.....? ..| 00000350 7d e9 15 87 3d d3 05 21 3a bc a5 54 fa 40 3b 53 |}...=..!:..T.@;S| 00000360 41 7c ea c6 28 |A|..(| >>> Flow 4 (server to client) 00000000 14 03 03 00 01 01 16 03 03 00 40 00 00 00 00 00 |..........@.....| 00000010 00 00 00 00 00 00 00 00 00 00 00 a8 8e 30 08 f0 |.............0..| 00000020 87 7b 13 31 99 6d 7e 9a 9b 03 d3 6f 84 d8 d9 31 |.{.1.m~....o...1| 00000030 2b d2 aa d4 0e ae 6e 72 03 ac e7 7e 5c 22 cc ac |+.....nr...~\"..| 00000040 33 b5 df 04 b2 4a 2b 6f bb a1 6f 17 03 03 00 40 |3....J+o..o....@| 00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000060 50 9c 81 04 9b 1d 61 8a 30 9c 18 68 c7 e1 c9 f3 |P.....a.0..h....| 00000070 70 f0 1b b6 4a dd fc c7 e3 e3 20 e2 4d 6f 9f bf |p...J..... .Mo..| 00000080 17 b0 5e 5b 45 73 29 1e d4 30 b4 03 ca 8e 69 63 |..^[Es)..0....ic| 00000090 15 03 03 00 30 00 00 00 00 00 00 00 00 00 00 00 |....0...........| 000000a0 00 00 00 00 00 28 ca 6a 4c 1b 3c 11 61 ce b2 58 |.....(.jL.<.a..X| 000000b0 94 e7 e4 7d c5 ce 51 03 c4 ae b5 4c 33 0b 3c 95 |...}..Q....L3.<.| 000000c0 ec b1 65 ea da |..e..|
go/src/crypto/tls/testdata/Server-TLSv12-ClientAuthRequestedAndGiven/0
{ "file_path": "go/src/crypto/tls/testdata/Server-TLSv12-ClientAuthRequestedAndGiven", "repo_id": "go", "token_count": 4693 }
224
>>> Flow 1 (client to server) 00000000 16 03 01 01 0a 01 00 01 06 03 03 2d b6 ca ea 39 |...........-...9| 00000010 59 17 86 df 90 2f 73 e0 a0 5c 6e 28 09 78 69 d6 |Y..../s..\n(.xi.| 00000020 30 06 b7 7b 17 a9 79 30 2a d8 57 20 c5 5c ed 86 |0..{..y0*.W .\..| 00000030 15 f4 3b c8 d2 5f 7a 80 2a 6a cd 40 c2 da 6f a8 |..;.._z.*j.@..o.| 00000040 cd d7 e7 bf 48 bd fb a1 e9 4b 9b a9 00 04 00 2f |....H....K...../| 00000050 00 ff 01 00 00 b9 00 23 00 79 00 00 00 00 00 00 |.......#.y......| 00000060 00 00 00 00 00 00 00 00 00 00 94 6f 2d b0 ac 51 |...........o-..Q| 00000070 ed 14 ef 68 ca 42 c5 4c 85 f6 26 0d a4 ad a8 f5 |...h.B.L..&.....| 00000080 14 64 4f b9 c3 fb 1e 55 c1 1f c7 31 57 72 68 db |.dO....U...1Wrh.| 00000090 03 37 a8 c9 07 f4 ca 62 6c 5c f3 8b 5a 3d 76 dd |.7.....bl\..Z=v.| 000000a0 63 ea 68 61 6b a1 2d 95 49 38 16 7e 51 5c e5 15 |c.hak.-.I8.~Q\..| 000000b0 c0 58 7d c5 67 4a 6f 64 b6 79 1a 41 9b b1 33 15 |.X}.gJod.y.A..3.| 000000c0 38 74 92 5c a5 48 c3 f2 94 bb 33 ec af cf d7 e7 |8t.\.H....3.....| 000000d0 c9 3e 35 00 16 00 00 00 17 00 00 00 0d 00 30 00 |.>5...........0.| 000000e0 2e 04 03 05 03 06 03 08 07 08 08 08 09 08 0a 08 |................| 000000f0 0b 08 04 08 05 08 06 04 01 05 01 06 01 03 03 02 |................| 00000100 03 03 01 02 01 03 02 02 02 04 02 05 02 06 02 |...............| >>> Flow 2 (server to client) 00000000 16 03 03 00 59 02 00 00 55 03 03 00 00 00 00 00 |....Y...U.......| 00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000020 00 00 00 44 4f 57 4e 47 52 44 01 20 c5 5c ed 86 |...DOWNGRD. .\..| 00000030 15 f4 3b c8 d2 5f 7a 80 2a 6a cd 40 c2 da 6f a8 |..;.._z.*j.@..o.| 00000040 cd d7 e7 bf 48 bd fb a1 e9 4b 9b a9 00 2f 00 00 |....H....K.../..| 00000050 0d 00 23 00 00 ff 01 00 01 00 00 17 00 00 16 03 |..#.............| 00000060 03 00 83 04 00 00 7f 00 00 00 00 00 79 00 00 00 |............y...| 00000070 00 00 00 00 00 00 00 00 00 00 00 00 00 94 6f 2d |..............o-| 00000080 b0 ac 51 ed 14 ef 68 ca 42 c5 4c 85 f6 26 0d a4 |..Q...h.B.L..&..| 00000090 ad a8 f5 14 64 4f b9 c3 fb 1e 55 c1 1f c7 31 57 |....dO....U...1W| 000000a0 72 68 db 03 37 a8 c9 07 f4 ca 62 6c 5c f3 8b 5a |rh..7.....bl\..Z| 000000b0 3d 76 dd 63 ea 68 61 6b a1 2d 95 49 38 16 7e 51 |=v.c.hak.-.I8.~Q| 000000c0 5c e5 15 c0 58 7d c5 67 4a 6f 64 b6 79 1a 41 9b |\...X}.gJod.y.A.| 000000d0 b1 33 15 38 74 92 5c a5 48 c3 f2 94 bb 33 ec af |.3.8t.\.H....3..| 000000e0 cf d7 e7 c9 3e 35 14 03 03 00 01 01 16 03 03 00 |....>5..........| 000000f0 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |@...............| 00000100 00 47 68 3a 66 5b d6 ed b7 60 a9 fb e8 37 d6 9d |.Gh:f[...`...7..| 00000110 a6 b9 4d d5 f3 9f 0f c6 3c 21 6e d5 80 08 a8 34 |..M.....<!n....4| 00000120 34 da a3 1f b0 20 28 e4 2f 2b 6b c6 3e dc ac 6b |4.... (./+k.>..k| 00000130 40 |@| >>> Flow 3 (client to server) 00000000 14 03 03 00 01 01 16 03 03 00 40 46 34 b3 97 54 |..........@F4..T| 00000010 20 5b 95 f3 22 f8 a1 89 c8 95 93 ba 7b a4 a8 8f | [..".......{...| 00000020 46 a8 d6 c1 b3 ac f0 e0 49 3d 8d e4 1c ac b8 a4 |F.......I=......| 00000030 01 21 5e d8 f0 f5 10 10 f7 de 8b 33 9d 94 cf f6 |.!^........3....| 00000040 f2 9b 39 22 5c e6 c0 5e b4 1d cd |..9"\..^...| >>> Flow 4 (server to client) 00000000 17 03 03 00 40 00 00 00 00 00 00 00 00 00 00 00 |....@...........| 00000010 00 00 00 00 00 5c bc 45 06 2e d3 7b 30 99 a6 af |.....\.E...{0...| 00000020 64 0e 63 93 73 6f 0a e7 a4 1d ac 94 25 11 a5 63 |d.c.so......%..c| 00000030 8d b2 44 aa 98 44 f8 b5 51 ea 2c fb 26 99 f6 a4 |..D..D..Q.,.&...| 00000040 2c f8 15 c3 90 15 03 03 00 30 00 00 00 00 00 00 |,........0......| 00000050 00 00 00 00 00 00 00 00 00 00 c6 58 8e 7c 97 de |...........X.|..| 00000060 3b b8 39 cd 7b 1d 67 77 27 da 93 39 52 a7 81 9b |;.9.{.gw'..9R...| 00000070 ab 5a bc e9 00 1a 64 3a ca f5 |.Z....d:..|
go/src/crypto/tls/testdata/Server-TLSv12-Resume/0
{ "file_path": "go/src/crypto/tls/testdata/Server-TLSv12-Resume", "repo_id": "go", "token_count": 1950 }
225
>>> Flow 1 (client to server) 00000000 16 03 01 00 ca 01 00 00 c6 03 03 4a ea 7e 77 82 |...........J.~w.| 00000010 17 37 46 db 49 14 d2 41 e4 14 b0 46 20 9d 50 45 |.7F.I..A...F .PE| 00000020 d1 75 08 82 2b 8d bc 9a 75 e3 71 20 ce 77 9a 79 |.u..+...u.q .w.y| 00000030 98 24 bc 15 be ac 30 fe 66 35 ab 51 be bd b4 fa |.$....0.f5.Q....| 00000040 6f 53 1f e9 5f 54 58 75 ce 94 f9 47 00 04 13 01 |oS.._TXu...G....| 00000050 00 ff 01 00 00 79 00 0b 00 04 03 00 01 02 00 0a |.....y..........| 00000060 00 0c 00 0a 00 1d 00 17 00 1e 00 19 00 18 00 16 |................| 00000070 00 00 00 17 00 00 00 0d 00 1e 00 1c 04 03 05 03 |................| 00000080 06 03 08 07 08 08 08 09 08 0a 08 0b 08 04 08 05 |................| 00000090 08 06 04 01 05 01 06 01 00 2b 00 03 02 03 04 00 |.........+......| 000000a0 2d 00 02 01 01 00 33 00 26 00 24 00 1d 00 20 e0 |-.....3.&.$... .| 000000b0 64 7e 58 b6 e7 32 fc c9 d6 3e f7 e0 f5 6a 9c 3a |d~X..2...>...j.:| 000000c0 e6 8f 83 d7 1c 27 62 72 71 06 71 de 49 96 05 |.....'brq.q.I..| >>> Flow 2 (server to client) 00000000 16 03 03 00 7a 02 00 00 76 03 03 00 00 00 00 00 |....z...v.......| 00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 00000020 00 00 00 00 00 00 00 00 00 00 00 20 ce 77 9a 79 |........... .w.y| 00000030 98 24 bc 15 be ac 30 fe 66 35 ab 51 be bd b4 fa |.$....0.f5.Q....| 00000040 6f 53 1f e9 5f 54 58 75 ce 94 f9 47 13 01 00 00 |oS.._TXu...G....| 00000050 2e 00 2b 00 02 03 04 00 33 00 24 00 1d 00 20 2f |..+.....3.$... /| 00000060 e5 7d a3 47 cd 62 43 15 28 da ac 5f bb 29 07 30 |.}.G.bC.(.._.).0| 00000070 ff f6 84 af c4 cf c2 ed 90 99 5f 58 cb 3b 74 14 |.........._X.;t.| 00000080 03 03 00 01 01 17 03 03 00 17 d0 8f 0a 7e 6c 33 |.............~l3| 00000090 0b be 2a 24 0e fc af df 6e 7d ad 22 28 82 77 60 |..*$....n}."(.w`| 000000a0 3d 17 03 03 00 3e 8f 87 8f f1 5a f6 6f eb eb d9 |=....>....Z.o...| 000000b0 da aa fc 3c 6d ac 73 94 a3 13 5f fe 01 bb 75 eb |...<m.s..._...u.| 000000c0 ce d5 75 68 39 3c 0b 3c 5d ab 3f dc eb 6d 30 79 |..uh9<.<].?..m0y| 000000d0 25 35 a8 2b 72 57 e4 9f 6f 4a 1e 03 74 eb 04 f9 |%5.+rW..oJ..t...| 000000e0 c4 4f c1 46 17 03 03 02 6d 50 d5 4a 67 1e a7 43 |.O.F....mP.Jg..C| 000000f0 50 f5 b9 55 8d 57 a3 0e e4 b0 ae 6c 5b ca f9 d7 |P..U.W.....l[...| 00000100 f7 22 34 92 72 76 a4 5b c4 c7 0a 75 cc 97 0c 03 |."4.rv.[...u....| 00000110 37 25 04 65 a6 fc da a8 fb 8c ef f1 25 d6 4b f6 |7%.e........%.K.| 00000120 ed d5 80 2b 10 b2 0b 31 f5 33 d7 df a7 9b c7 6e |...+...1.3.....n| 00000130 52 f6 ed ac e0 20 1e f2 93 11 b7 bd b0 0d 5e 41 |R.... ........^A| 00000140 be e4 b5 e4 a1 6d 21 45 27 1d 5e 65 fe 2c e9 f4 |.....m!E'.^e.,..| 00000150 97 5c 45 5b 23 5a 8c dc 29 64 61 28 48 cf ae 3f |.\E[#Z..)da(H..?| 00000160 2c 21 b1 68 9e 09 74 76 77 65 f4 85 07 dc f3 0e |,!.h..tvwe......| 00000170 27 97 4d 60 47 35 31 13 92 15 90 f4 ca 24 84 6e |'.M`G51......$.n| 00000180 da 91 13 a3 37 54 4e 52 d4 9c 2f 82 6e 76 fd d7 |....7TNR../.nv..| 00000190 81 29 fa 76 ac be 32 04 0d 7e d0 09 b0 47 ff 6d |.).v..2..~...G.m| 000001a0 37 32 6d 2c 8e d3 06 94 da 3f 20 87 56 f8 4a 2f |72m,.....? .V.J/| 000001b0 d6 75 4d 90 5c c1 e5 f6 82 c4 3c d9 da a2 95 0a |.uM.\.....<.....| 000001c0 4c 36 59 a9 10 50 a0 0d 61 06 4f 65 6e 64 bb 29 |L6Y..P..a.Oend.)| 000001d0 42 9f 62 81 65 54 22 cb 4c a5 95 1b 2a 49 18 2f |B.b.eT".L...*I./| 000001e0 fc 54 b4 38 27 3f cf ab b0 97 3e 17 8b ae 98 a0 |.T.8'?....>.....| 000001f0 50 76 d6 79 39 e8 25 bc 3b d9 5a a4 a8 5e 08 6a |Pv.y9.%.;.Z..^.j| 00000200 1c 48 b1 11 f0 d9 b9 48 39 e1 23 db 41 8c bf bd |.H.....H9.#.A...| 00000210 20 27 7b 0c 89 10 1f b0 ae 70 18 9a 7f f2 b4 1d | '{......p......| 00000220 20 cd 6d 80 38 00 a4 33 de 22 ef f6 42 52 c7 66 | .m.8..3."..BR.f| 00000230 83 4a 67 18 6b a6 38 27 d3 40 cf a2 a9 3e 58 06 |.Jg.k.8'.@...>X.| 00000240 91 a7 36 08 29 10 4d 8f 1b f9 7c 5a 17 05 81 b9 |..6.).M...|Z....| 00000250 4b 60 48 40 49 73 63 8a ef 9f f2 9e 80 85 57 fa |K`H@Isc.......W.| 00000260 0a b8 72 83 f3 26 fa 07 ae d2 47 99 b5 a6 5d c5 |..r..&....G...].| 00000270 1e b5 fc ea 0f 17 f8 ba e2 5c 7d 59 70 53 2e 23 |.........\}YpS.#| 00000280 f7 55 75 cb de 82 dc ca b1 bf 3f 5f 7d 7d 92 3c |.Uu.......?_}}.<| 00000290 29 29 64 30 74 0a 01 0b c0 95 db 45 fe 20 be 38 |))d0t......E. .8| 000002a0 c5 87 b7 e4 a9 93 63 67 6b 9a 2f 24 9e 62 8f 1d |......cgk./$.b..| 000002b0 bd 8c 4a d4 b0 0f 95 2f 56 b2 1c e8 5a 58 81 2e |..J..../V...ZX..| 000002c0 b5 b5 b5 f2 1b 7f 6c 39 58 75 51 dc 83 2a 59 0b |......l9XuQ..*Y.| 000002d0 78 5e 22 7e af ee 59 af b9 8f dc 65 97 af a5 b5 |x^"~..Y....e....| 000002e0 b8 50 af 35 51 30 e7 0a 75 e2 d0 33 e2 fb f4 b1 |.P.5Q0..u..3....| 000002f0 99 cd 5f 72 6b a9 f8 85 a1 a5 19 7e 2b 91 01 19 |.._rk......~+...| 00000300 09 dd 88 6e a7 d6 54 57 cd ef d0 97 6a 68 d9 6e |...n..TW....jh.n| 00000310 52 38 ef a5 fa 84 63 70 f0 6d 64 ec 66 1a c9 b5 |R8....cp.md.f...| 00000320 78 ba 17 74 f4 b4 2b a2 fe 9a 7f 38 b8 5e 8b 56 |x..t..+....8.^.V| 00000330 a6 7b 2c 92 7f db 58 c8 fa f9 2d 6b 00 25 dc 0a |.{,...X...-k.%..| 00000340 aa 13 e8 40 f3 fd 47 23 f6 bf 1c 30 fc 91 18 95 |...@..G#...0....| 00000350 ac a8 82 3d f5 ef 17 03 03 00 99 7e 30 4f f1 00 |...=.......~0O..| 00000360 1e dd eb c6 54 d2 f5 f7 21 aa 6b b0 83 0c fa 8b |....T...!.k.....| 00000370 12 af ac 15 3e 54 b6 1c 85 9b 0c 80 02 d8 e3 5f |....>T........._| 00000380 36 57 64 fe 7a b8 31 d0 aa 59 f1 e6 af e0 27 c5 |6Wd.z.1..Y....'.| 00000390 b8 d8 2f ab e0 cc c3 02 18 73 30 36 b5 2a 0d 12 |../......s06.*..| 000003a0 a4 45 e6 c3 79 9f 54 cb 51 61 1a b8 aa 87 45 43 |.E..y.T.Qa....EC| 000003b0 8e 93 58 66 f2 97 cb 3b 44 df ae 93 17 de 22 99 |..Xf...;D.....".| 000003c0 3c b4 9d 21 a6 db 03 ce 7b fb 67 b9 6e fb ab 50 |<..!....{.g.n..P| 000003d0 f8 33 9f 20 a0 fb e9 54 bb 62 16 19 d6 df 8c fe |.3. ...T.b......| 000003e0 3d 63 42 7c b0 72 2b 0d 87 1e f7 7d bb 59 ba f5 |=cB|.r+....}.Y..| 000003f0 d6 e8 f3 57 17 03 03 00 35 9e 6f 39 92 2e 32 10 |...W....5.o9..2.| 00000400 03 cd f0 28 2c 1a 32 77 19 c8 39 38 60 0c 28 b7 |...(,.2w..98`.(.| 00000410 8c 3a d8 50 a1 44 e4 d6 c5 64 59 88 2d a4 23 c9 |.:.P.D...dY.-.#.| 00000420 26 d1 96 0c c9 5d da 84 3e 8a 7d fe 80 77 |&....]..>.}..w| >>> Flow 3 (client to server) 00000000 14 03 03 00 01 01 17 03 03 00 19 87 49 99 a6 d9 |............I...| 00000010 ed cd f7 7a 75 14 e1 26 41 d2 6e 5c 79 a6 be 7c |...zu..&A.n\y..|| 00000020 5d 9d 44 36 17 03 03 00 35 1b 51 a9 b1 ce 11 ed |].D6....5.Q.....| 00000030 95 47 34 b9 3d 2f 6e 27 b2 e5 31 54 7f e3 8a 11 |.G4.=/n'..1T....| 00000040 fd 54 75 2c b6 8a 56 25 00 29 a7 5f 7a 1e 16 be |.Tu,..V%.)._z...| 00000050 16 e3 86 3a 72 84 0e bc 40 ef fd ad 18 33 |...:r...@....3| >>> Flow 4 (server to client) 00000000 17 03 03 00 8b 69 2e 81 c4 4d 43 a6 1f 96 b7 8e |.....i...MC.....| 00000010 87 4a 9b be 48 3c 31 18 98 f4 8c 04 24 b2 52 96 |.J..H<1.....$.R.| 00000020 04 b5 12 7c 54 37 83 6d 51 42 c6 52 f4 a5 bc d3 |...|T7.mQB.R....| 00000030 d1 c8 29 ab 4f e4 02 da 74 ec 8e 13 ad 03 f3 e0 |..).O...t.......| 00000040 7f 44 58 6b c7 28 a5 6a 75 30 b8 b1 31 38 fe ba |.DXk.(.ju0..18..| 00000050 e7 27 ae b3 e7 cb 5e 78 24 82 03 61 ba ae dd 4c |.'....^x$..a...L| 00000060 c6 7b f3 45 cf 6f a8 dc 7d 5d 73 65 db ae dc 10 |.{.E.o..}]se....| 00000070 ff 32 dc 4c b4 5e dc ce 4c 34 37 83 a0 0c d5 20 |.2.L.^..L47.... | 00000080 f1 f6 81 42 bc 63 65 47 80 d0 d6 f3 08 aa e2 58 |...B.ceG.......X| 00000090 17 03 03 00 1e 85 84 f3 e4 0f a8 24 c0 fe fa 2c |...........$...,| 000000a0 8b 60 52 32 73 2b 95 e9 37 3d 1c bd 2f ee ff e2 |.`R2s+..7=../...| 000000b0 70 13 df 17 03 03 00 13 65 2b 11 5f 50 7c 11 eb |p.......e+._P|..| 000000c0 3b 06 75 23 28 13 ca 4a b5 fb dc |;.u#(..J...|
go/src/crypto/tls/testdata/Server-TLSv13-ClientAuthRequestedNotGiven/0
{ "file_path": "go/src/crypto/tls/testdata/Server-TLSv13-ClientAuthRequestedNotGiven", "repo_id": "go", "token_count": 3989 }
226
-----BEGIN EC PRIVATE KEY----- MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== -----END EC PRIVATE KEY-----
go/src/crypto/tls/testdata/example-key.pem/0
{ "file_path": "go/src/crypto/tls/testdata/example-key.pem", "repo_id": "go", "token_count": 143 }
227
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build dragonfly || freebsd || netbsd || openbsd package x509 // Possible certificate files; stop after finding one. var certFiles = []string{ "/usr/local/etc/ssl/cert.pem", // FreeBSD "/etc/ssl/cert.pem", // OpenBSD "/usr/local/share/certs/ca-root-nss.crt", // DragonFly "/etc/openssl/certs/ca-certificates.crt", // NetBSD } // Possible directories with certificate files; all will be read. var certDirectories = []string{ "/etc/ssl/certs", // FreeBSD 12.2+ "/usr/local/share/certs", // FreeBSD "/etc/openssl/certs", // NetBSD }
go/src/crypto/x509/root_bsd.go/0
{ "file_path": "go/src/crypto/x509/root_bsd.go", "repo_id": "go", "token_count": 288 }
228
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package x509 implements a subset of the X.509 standard. // // It allows parsing and generating certificates, certificate signing // requests, certificate revocation lists, and encoded public and private keys. // It provides a certificate verifier, complete with a chain builder. // // The package targets the X.509 technical profile defined by the IETF (RFC // 2459/3280/5280), and as further restricted by the CA/Browser Forum Baseline // Requirements. There is minimal support for features outside of these // profiles, as the primary goal of the package is to provide compatibility // with the publicly trusted TLS certificate ecosystem and its policies and // constraints. // // On macOS and Windows, certificate verification is handled by system APIs, but // the package aims to apply consistent validation rules across operating // systems. package x509 import ( "bytes" "crypto" "crypto/ecdh" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" "crypto/rsa" "crypto/sha1" "crypto/x509/pkix" "encoding/asn1" "encoding/pem" "errors" "fmt" "internal/godebug" "io" "math/big" "net" "net/url" "strconv" "time" "unicode" // Explicitly import these for their crypto.RegisterHash init side-effects. // Keep these as blank imports, even if they're imported above. _ "crypto/sha1" _ "crypto/sha256" _ "crypto/sha512" "golang.org/x/crypto/cryptobyte" cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" ) // pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo // in RFC 3280. type pkixPublicKey struct { Algo pkix.AlgorithmIdentifier BitString asn1.BitString } // ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form. The encoded // public key is a SubjectPublicKeyInfo structure (see RFC 5280, Section 4.1). // // It returns a *[rsa.PublicKey], *[dsa.PublicKey], *[ecdsa.PublicKey], // [ed25519.PublicKey] (not a pointer), or *[ecdh.PublicKey] (for X25519). // More types might be supported in the future. // // This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY". func ParsePKIXPublicKey(derBytes []byte) (pub any, err error) { var pki publicKeyInfo if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil { if _, err := asn1.Unmarshal(derBytes, &pkcs1PublicKey{}); err == nil { return nil, errors.New("x509: failed to parse public key (use ParsePKCS1PublicKey instead for this key format)") } return nil, err } else if len(rest) != 0 { return nil, errors.New("x509: trailing data after ASN.1 of public-key") } return parsePublicKey(&pki) } func marshalPublicKey(pub any) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { switch pub := pub.(type) { case *rsa.PublicKey: publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ N: pub.N, E: pub.E, }) if err != nil { return nil, pkix.AlgorithmIdentifier{}, err } publicKeyAlgorithm.Algorithm = oidPublicKeyRSA // This is a NULL parameters value which is required by // RFC 3279, Section 2.3.1. publicKeyAlgorithm.Parameters = asn1.NullRawValue case *ecdsa.PublicKey: oid, ok := oidFromNamedCurve(pub.Curve) if !ok { return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") } if !pub.Curve.IsOnCurve(pub.X, pub.Y) { return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: invalid elliptic curve public key") } publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA var paramBytes []byte paramBytes, err = asn1.Marshal(oid) if err != nil { return } publicKeyAlgorithm.Parameters.FullBytes = paramBytes case ed25519.PublicKey: publicKeyBytes = pub publicKeyAlgorithm.Algorithm = oidPublicKeyEd25519 case *ecdh.PublicKey: publicKeyBytes = pub.Bytes() if pub.Curve() == ecdh.X25519() { publicKeyAlgorithm.Algorithm = oidPublicKeyX25519 } else { oid, ok := oidFromECDHCurve(pub.Curve()) if !ok { return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") } publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA var paramBytes []byte paramBytes, err = asn1.Marshal(oid) if err != nil { return } publicKeyAlgorithm.Parameters.FullBytes = paramBytes } default: return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub) } return publicKeyBytes, publicKeyAlgorithm, nil } // MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form. // The encoded public key is a SubjectPublicKeyInfo structure // (see RFC 5280, Section 4.1). // // The following key types are currently supported: *[rsa.PublicKey], // *[ecdsa.PublicKey], [ed25519.PublicKey] (not a pointer), and *[ecdh.PublicKey]. // Unsupported key types result in an error. // // This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY". func MarshalPKIXPublicKey(pub any) ([]byte, error) { var publicKeyBytes []byte var publicKeyAlgorithm pkix.AlgorithmIdentifier var err error if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { return nil, err } pkix := pkixPublicKey{ Algo: publicKeyAlgorithm, BitString: asn1.BitString{ Bytes: publicKeyBytes, BitLength: 8 * len(publicKeyBytes), }, } ret, _ := asn1.Marshal(pkix) return ret, nil } // These structures reflect the ASN.1 structure of X.509 certificates.: type certificate struct { TBSCertificate tbsCertificate SignatureAlgorithm pkix.AlgorithmIdentifier SignatureValue asn1.BitString } type tbsCertificate struct { Raw asn1.RawContent Version int `asn1:"optional,explicit,default:0,tag:0"` SerialNumber *big.Int SignatureAlgorithm pkix.AlgorithmIdentifier Issuer asn1.RawValue Validity validity Subject asn1.RawValue PublicKey publicKeyInfo UniqueId asn1.BitString `asn1:"optional,tag:1"` SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` Extensions []pkix.Extension `asn1:"omitempty,optional,explicit,tag:3"` } type dsaAlgorithmParameters struct { P, Q, G *big.Int } type validity struct { NotBefore, NotAfter time.Time } type publicKeyInfo struct { Raw asn1.RawContent Algorithm pkix.AlgorithmIdentifier PublicKey asn1.BitString } // RFC 5280, 4.2.1.1 type authKeyId struct { Id []byte `asn1:"optional,tag:0"` } type SignatureAlgorithm int const ( UnknownSignatureAlgorithm SignatureAlgorithm = iota MD2WithRSA // Unsupported. MD5WithRSA // Only supported for signing, not verification. SHA1WithRSA // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. SHA256WithRSA SHA384WithRSA SHA512WithRSA DSAWithSHA1 // Unsupported. DSAWithSHA256 // Unsupported. ECDSAWithSHA1 // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. ECDSAWithSHA256 ECDSAWithSHA384 ECDSAWithSHA512 SHA256WithRSAPSS SHA384WithRSAPSS SHA512WithRSAPSS PureEd25519 ) func (algo SignatureAlgorithm) isRSAPSS() bool { for _, details := range signatureAlgorithmDetails { if details.algo == algo { return details.isRSAPSS } } return false } func (algo SignatureAlgorithm) hashFunc() crypto.Hash { for _, details := range signatureAlgorithmDetails { if details.algo == algo { return details.hash } } return crypto.Hash(0) } func (algo SignatureAlgorithm) String() string { for _, details := range signatureAlgorithmDetails { if details.algo == algo { return details.name } } return strconv.Itoa(int(algo)) } type PublicKeyAlgorithm int const ( UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota RSA DSA // Only supported for parsing. ECDSA Ed25519 ) var publicKeyAlgoName = [...]string{ RSA: "RSA", DSA: "DSA", ECDSA: "ECDSA", Ed25519: "Ed25519", } func (algo PublicKeyAlgorithm) String() string { if 0 < algo && int(algo) < len(publicKeyAlgoName) { return publicKeyAlgoName[algo] } return strconv.Itoa(int(algo)) } // OIDs for signature algorithms // // pkcs-1 OBJECT IDENTIFIER ::= { // iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } // // RFC 3279 2.2.1 RSA Signature Algorithms // // md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } // // sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } // // dsaWithSha1 OBJECT IDENTIFIER ::= { // iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } // // RFC 3279 2.2.3 ECDSA Signature Algorithm // // ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { // iso(1) member-body(2) us(840) ansi-x962(10045) // signatures(4) ecdsa-with-SHA1(1)} // // RFC 4055 5 PKCS #1 Version 1.5 // // sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } // // sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } // // sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } // // RFC 5758 3.1 DSA Signature Algorithms // // dsaWithSha256 OBJECT IDENTIFIER ::= { // joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) // csor(3) algorithms(4) id-dsa-with-sha2(3) 2} // // RFC 5758 3.2 ECDSA Signature Algorithm // // ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) // us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } // // ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) // us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } // // ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) // us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } // // RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers // // id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } var ( oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA // but it's specified by ISO. Microsoft's makecert.exe has been known // to produce certificates with this OID. oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} ) var signatureAlgorithmDetails = []struct { algo SignatureAlgorithm name string oid asn1.ObjectIdentifier params asn1.RawValue pubKeyAlgo PublicKeyAlgorithm hash crypto.Hash isRSAPSS bool }{ {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, asn1.NullRawValue, RSA, crypto.MD5, false}, {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, asn1.NullRawValue, RSA, crypto.SHA256, false}, {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, asn1.NullRawValue, RSA, crypto.SHA384, false}, {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, asn1.NullRawValue, RSA, crypto.SHA512, false}, {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, pssParametersSHA256, RSA, crypto.SHA256, true}, {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, pssParametersSHA384, RSA, crypto.SHA384, true}, {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, pssParametersSHA512, RSA, crypto.SHA512, true}, {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, emptyRawValue, DSA, crypto.SHA1, false}, {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, emptyRawValue, DSA, crypto.SHA256, false}, {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, emptyRawValue, ECDSA, crypto.SHA1, false}, {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, emptyRawValue, ECDSA, crypto.SHA256, false}, {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, emptyRawValue, ECDSA, crypto.SHA384, false}, {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, emptyRawValue, ECDSA, crypto.SHA512, false}, {PureEd25519, "Ed25519", oidSignatureEd25519, emptyRawValue, Ed25519, crypto.Hash(0) /* no pre-hashing */, false}, } var emptyRawValue = asn1.RawValue{} // DER encoded RSA PSS parameters for the // SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3. // The parameters contain the following values: // - hashAlgorithm contains the associated hash identifier with NULL parameters // - maskGenAlgorithm always contains the default mgf1SHA1 identifier // - saltLength contains the length of the associated hash // - trailerField always contains the default trailerFieldBC value var ( pssParametersSHA256 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}} pssParametersSHA384 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}} pssParametersSHA512 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}} ) // pssParameters reflects the parameters in an AlgorithmIdentifier that // specifies RSA PSS. See RFC 3447, Appendix A.2.3. type pssParameters struct { // The following three fields are not marked as // optional because the default values specify SHA-1, // which is no longer suitable for use in signatures. Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` SaltLength int `asn1:"explicit,tag:2"` TrailerField int `asn1:"optional,explicit,tag:3,default:1"` } func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm { if ai.Algorithm.Equal(oidSignatureEd25519) { // RFC 8410, Section 3 // > For all of the OIDs, the parameters MUST be absent. if len(ai.Parameters.FullBytes) != 0 { return UnknownSignatureAlgorithm } } if !ai.Algorithm.Equal(oidSignatureRSAPSS) { for _, details := range signatureAlgorithmDetails { if ai.Algorithm.Equal(details.oid) { return details.algo } } return UnknownSignatureAlgorithm } // RSA PSS is special because it encodes important parameters // in the Parameters. var params pssParameters if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, &params); err != nil { return UnknownSignatureAlgorithm } var mgf1HashFunc pkix.AlgorithmIdentifier if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { return UnknownSignatureAlgorithm } // PSS is greatly overburdened with options. This code forces them into // three buckets by requiring that the MGF1 hash function always match the // message hash function (as recommended in RFC 3447, Section 8.1), that the // salt length matches the hash length, and that the trailer field has the // default value. if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || !params.MGF.Algorithm.Equal(oidMGF1) || !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || params.TrailerField != 1 { return UnknownSignatureAlgorithm } switch { case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: return SHA256WithRSAPSS case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: return SHA384WithRSAPSS case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: return SHA512WithRSAPSS } return UnknownSignatureAlgorithm } var ( // RFC 3279, 2.3 Public Key Algorithms // // pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) // rsadsi(113549) pkcs(1) 1 } // // rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } // // id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) // x9-57(10040) x9cm(4) 1 } oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} // RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters // // id-ecPublicKey OBJECT IDENTIFIER ::= { // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} // RFC 8410, Section 3 // // id-X25519 OBJECT IDENTIFIER ::= { 1 3 101 110 } // id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } oidPublicKeyX25519 = asn1.ObjectIdentifier{1, 3, 101, 110} oidPublicKeyEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} ) // getPublicKeyAlgorithmFromOID returns the exposed PublicKeyAlgorithm // identifier for public key types supported in certificates and CSRs. Marshal // and Parse functions may support a different set of public key types. func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm { switch { case oid.Equal(oidPublicKeyRSA): return RSA case oid.Equal(oidPublicKeyDSA): return DSA case oid.Equal(oidPublicKeyECDSA): return ECDSA case oid.Equal(oidPublicKeyEd25519): return Ed25519 } return UnknownPublicKeyAlgorithm } // RFC 5480, 2.1.1.1. Named Curve // // secp224r1 OBJECT IDENTIFIER ::= { // iso(1) identified-organization(3) certicom(132) curve(0) 33 } // // secp256r1 OBJECT IDENTIFIER ::= { // iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) // prime(1) 7 } // // secp384r1 OBJECT IDENTIFIER ::= { // iso(1) identified-organization(3) certicom(132) curve(0) 34 } // // secp521r1 OBJECT IDENTIFIER ::= { // iso(1) identified-organization(3) certicom(132) curve(0) 35 } // // NB: secp256r1 is equivalent to prime256v1 var ( oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} ) func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { switch { case oid.Equal(oidNamedCurveP224): return elliptic.P224() case oid.Equal(oidNamedCurveP256): return elliptic.P256() case oid.Equal(oidNamedCurveP384): return elliptic.P384() case oid.Equal(oidNamedCurveP521): return elliptic.P521() } return nil } func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { switch curve { case elliptic.P224(): return oidNamedCurveP224, true case elliptic.P256(): return oidNamedCurveP256, true case elliptic.P384(): return oidNamedCurveP384, true case elliptic.P521(): return oidNamedCurveP521, true } return nil, false } func oidFromECDHCurve(curve ecdh.Curve) (asn1.ObjectIdentifier, bool) { switch curve { case ecdh.X25519(): return oidPublicKeyX25519, true case ecdh.P256(): return oidNamedCurveP256, true case ecdh.P384(): return oidNamedCurveP384, true case ecdh.P521(): return oidNamedCurveP521, true } return nil, false } // KeyUsage represents the set of actions that are valid for a given key. It's // a bitmap of the KeyUsage* constants. type KeyUsage int const ( KeyUsageDigitalSignature KeyUsage = 1 << iota KeyUsageContentCommitment KeyUsageKeyEncipherment KeyUsageDataEncipherment KeyUsageKeyAgreement KeyUsageCertSign KeyUsageCRLSign KeyUsageEncipherOnly KeyUsageDecipherOnly ) // RFC 5280, 4.2.1.12 Extended Key Usage // // anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } // // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } // id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } // id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } // id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } var ( oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} ) // ExtKeyUsage represents an extended set of actions that are valid for a given key. // Each of the ExtKeyUsage* constants define a unique action. type ExtKeyUsage int const ( ExtKeyUsageAny ExtKeyUsage = iota ExtKeyUsageServerAuth ExtKeyUsageClientAuth ExtKeyUsageCodeSigning ExtKeyUsageEmailProtection ExtKeyUsageIPSECEndSystem ExtKeyUsageIPSECTunnel ExtKeyUsageIPSECUser ExtKeyUsageTimeStamping ExtKeyUsageOCSPSigning ExtKeyUsageMicrosoftServerGatedCrypto ExtKeyUsageNetscapeServerGatedCrypto ExtKeyUsageMicrosoftCommercialCodeSigning ExtKeyUsageMicrosoftKernelCodeSigning ) // extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. var extKeyUsageOIDs = []struct { extKeyUsage ExtKeyUsage oid asn1.ObjectIdentifier }{ {ExtKeyUsageAny, oidExtKeyUsageAny}, {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, } func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { for _, pair := range extKeyUsageOIDs { if oid.Equal(pair.oid) { return pair.extKeyUsage, true } } return } func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) { for _, pair := range extKeyUsageOIDs { if eku == pair.extKeyUsage { return pair.oid, true } } return } // A Certificate represents an X.509 certificate. type Certificate struct { Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature). RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content. RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. RawSubject []byte // DER encoded Subject RawIssuer []byte // DER encoded Issuer Signature []byte SignatureAlgorithm SignatureAlgorithm PublicKeyAlgorithm PublicKeyAlgorithm PublicKey any Version int SerialNumber *big.Int Issuer pkix.Name Subject pkix.Name NotBefore, NotAfter time.Time // Validity bounds. KeyUsage KeyUsage // Extensions contains raw X.509 extensions. When parsing certificates, // this can be used to extract non-critical extensions that are not // parsed by this package. When marshaling certificates, the Extensions // field is ignored, see ExtraExtensions. Extensions []pkix.Extension // ExtraExtensions contains extensions to be copied, raw, into any // marshaled certificates. Values override any extensions that would // otherwise be produced based on the other fields. The ExtraExtensions // field is not populated when parsing certificates, see Extensions. ExtraExtensions []pkix.Extension // UnhandledCriticalExtensions contains a list of extension IDs that // were not (fully) processed when parsing. Verify will fail if this // slice is non-empty, unless verification is delegated to an OS // library which understands all the critical extensions. // // Users can access these extensions using Extensions and can remove // elements from this slice if they believe that they have been // handled. UnhandledCriticalExtensions []asn1.ObjectIdentifier ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages. UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package. // BasicConstraintsValid indicates whether IsCA, MaxPathLen, // and MaxPathLenZero are valid. BasicConstraintsValid bool IsCA bool // MaxPathLen and MaxPathLenZero indicate the presence and // value of the BasicConstraints' "pathLenConstraint". // // When parsing a certificate, a positive non-zero MaxPathLen // means that the field was specified, -1 means it was unset, // and MaxPathLenZero being true mean that the field was // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false // should be treated equivalent to -1 (unset). // // When generating a certificate, an unset pathLenConstraint // can be requested with either MaxPathLen == -1 or using the // zero value for both MaxPathLen and MaxPathLenZero. MaxPathLen int // MaxPathLenZero indicates that BasicConstraintsValid==true // and MaxPathLen==0 should be interpreted as an actual // maximum path length of zero. Otherwise, that combination is // interpreted as MaxPathLen not being set. MaxPathLenZero bool SubjectKeyId []byte AuthorityKeyId []byte // RFC 5280, 4.2.2.1 (Authority Information Access) OCSPServer []string IssuingCertificateURL []string // Subject Alternate Name values. (Note that these values may not be valid // if invalid values were contained within a parsed certificate. For // example, an element of DNSNames may not be a valid DNS domain name.) DNSNames []string EmailAddresses []string IPAddresses []net.IP URIs []*url.URL // Name constraints PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical. PermittedDNSDomains []string ExcludedDNSDomains []string PermittedIPRanges []*net.IPNet ExcludedIPRanges []*net.IPNet PermittedEmailAddresses []string ExcludedEmailAddresses []string PermittedURIDomains []string ExcludedURIDomains []string // CRL Distribution Points CRLDistributionPoints []string // PolicyIdentifiers contains asn1.ObjectIdentifiers, the components // of which are limited to int32. If a certificate contains a policy which // cannot be represented by asn1.ObjectIdentifier, it will not be included in // PolicyIdentifiers, but will be present in Policies, which contains all parsed // policy OIDs. PolicyIdentifiers []asn1.ObjectIdentifier // Policies contains all policy identifiers included in the certificate. // In Go 1.22, encoding/gob cannot handle and ignores this field. Policies []OID } // ErrUnsupportedAlgorithm results from attempting to perform an operation that // involves algorithms that are not currently implemented. var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented") // An InsecureAlgorithmError indicates that the [SignatureAlgorithm] used to // generate the signature is not secure, and the signature has been rejected. // // To temporarily restore support for SHA-1 signatures, include the value // "x509sha1=1" in the GODEBUG environment variable. Note that this option will // be removed in a future release. type InsecureAlgorithmError SignatureAlgorithm func (e InsecureAlgorithmError) Error() string { var override string if SignatureAlgorithm(e) == SHA1WithRSA || SignatureAlgorithm(e) == ECDSAWithSHA1 { override = " (temporarily override with GODEBUG=x509sha1=1)" } return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) + override } // ConstraintViolationError results when a requested usage is not permitted by // a certificate. For example: checking a signature when the public key isn't a // certificate signing key. type ConstraintViolationError struct{} func (ConstraintViolationError) Error() string { return "x509: invalid signature: parent certificate cannot sign this kind of certificate" } func (c *Certificate) Equal(other *Certificate) bool { if c == nil || other == nil { return c == other } return bytes.Equal(c.Raw, other.Raw) } func (c *Certificate) hasSANExtension() bool { return oidInExtensions(oidExtensionSubjectAltName, c.Extensions) } // CheckSignatureFrom verifies that the signature on c is a valid signature from parent. // // This is a low-level API that performs very limited checks, and not a full // path verifier. Most users should use [Certificate.Verify] instead. func (c *Certificate) CheckSignatureFrom(parent *Certificate) error { // RFC 5280, 4.2.1.9: // "If the basic constraints extension is not present in a version 3 // certificate, or the extension is present but the cA boolean is not // asserted, then the certified public key MUST NOT be used to verify // certificate signatures." if parent.Version == 3 && !parent.BasicConstraintsValid || parent.BasicConstraintsValid && !parent.IsCA { return ConstraintViolationError{} } if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 { return ConstraintViolationError{} } if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { return ErrUnsupportedAlgorithm } return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature, parent.PublicKey, false) } // CheckSignature verifies that signature is a valid signature over signed from // c's public key. // // This is a low-level API that performs no validity checks on the certificate. // // [MD5WithRSA] signatures are rejected, while [SHA1WithRSA] and [ECDSAWithSHA1] // signatures are currently accepted. func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error { return checkSignature(algo, signed, signature, c.PublicKey, true) } func (c *Certificate) hasNameConstraints() bool { return oidInExtensions(oidExtensionNameConstraints, c.Extensions) } func (c *Certificate) getSANExtension() []byte { for _, e := range c.Extensions { if e.Id.Equal(oidExtensionSubjectAltName) { return e.Value } } return nil } func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey any) error { return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey) } var x509sha1 = godebug.New("x509sha1") // checkSignature verifies that signature is a valid signature over signed from // a crypto.PublicKey. func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey, allowSHA1 bool) (err error) { var hashType crypto.Hash var pubKeyAlgo PublicKeyAlgorithm for _, details := range signatureAlgorithmDetails { if details.algo == algo { hashType = details.hash pubKeyAlgo = details.pubKeyAlgo break } } switch hashType { case crypto.Hash(0): if pubKeyAlgo != Ed25519 { return ErrUnsupportedAlgorithm } case crypto.MD5: return InsecureAlgorithmError(algo) case crypto.SHA1: // SHA-1 signatures are mostly disabled. See go.dev/issue/41682. if !allowSHA1 { if x509sha1.Value() != "1" { return InsecureAlgorithmError(algo) } x509sha1.IncNonDefault() } fallthrough default: if !hashType.Available() { return ErrUnsupportedAlgorithm } h := hashType.New() h.Write(signed) signed = h.Sum(nil) } switch pub := publicKey.(type) { case *rsa.PublicKey: if pubKeyAlgo != RSA { return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) } if algo.isRSAPSS() { return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}) } else { return rsa.VerifyPKCS1v15(pub, hashType, signed, signature) } case *ecdsa.PublicKey: if pubKeyAlgo != ECDSA { return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) } if !ecdsa.VerifyASN1(pub, signed, signature) { return errors.New("x509: ECDSA verification failure") } return case ed25519.PublicKey: if pubKeyAlgo != Ed25519 { return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) } if !ed25519.Verify(pub, signed, signature) { return errors.New("x509: Ed25519 verification failure") } return } return ErrUnsupportedAlgorithm } // CheckCRLSignature checks that the signature in crl is from c. // // Deprecated: Use [RevocationList.CheckSignatureFrom] instead. func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error { algo := getSignatureAlgorithmFromAI(crl.SignatureAlgorithm) return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) } type UnhandledCriticalExtension struct{} func (h UnhandledCriticalExtension) Error() string { return "x509: unhandled critical extension" } type basicConstraints struct { IsCA bool `asn1:"optional"` MaxPathLen int `asn1:"optional,default:-1"` } // RFC 5280 4.2.1.4 type policyInformation struct { Policy asn1.ObjectIdentifier // policyQualifiers omitted } const ( nameTypeEmail = 1 nameTypeDNS = 2 nameTypeURI = 6 nameTypeIP = 7 ) // RFC 5280, 4.2.2.1 type authorityInfoAccess struct { Method asn1.ObjectIdentifier Location asn1.RawValue } // RFC 5280, 4.2.1.14 type distributionPoint struct { DistributionPoint distributionPointName `asn1:"optional,tag:0"` Reason asn1.BitString `asn1:"optional,tag:1"` CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` } type distributionPointName struct { FullName []asn1.RawValue `asn1:"optional,tag:0"` RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` } func reverseBitsInAByte(in byte) byte { b1 := in>>4 | in<<4 b2 := b1>>2&0x33 | b1<<2&0xcc b3 := b2>>1&0x55 | b2<<1&0xaa return b3 } // asn1BitLength returns the bit-length of bitString by considering the // most-significant bit in a byte to be the "first" bit. This convention // matches ASN.1, but differs from almost everything else. func asn1BitLength(bitString []byte) int { bitLen := len(bitString) * 8 for i := range bitString { b := bitString[len(bitString)-i-1] for bit := uint(0); bit < 8; bit++ { if (b>>bit)&1 == 1 { return bitLen } bitLen-- } } return 0 } var ( oidExtensionSubjectKeyId = []int{2, 5, 29, 14} oidExtensionKeyUsage = []int{2, 5, 29, 15} oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37} oidExtensionAuthorityKeyId = []int{2, 5, 29, 35} oidExtensionBasicConstraints = []int{2, 5, 29, 19} oidExtensionSubjectAltName = []int{2, 5, 29, 17} oidExtensionCertificatePolicies = []int{2, 5, 29, 32} oidExtensionNameConstraints = []int{2, 5, 29, 30} oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31} oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} oidExtensionCRLNumber = []int{2, 5, 29, 20} oidExtensionReasonCode = []int{2, 5, 29, 21} ) var ( oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} ) // oidInExtensions reports whether an extension with the given oid exists in // extensions. func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { for _, e := range extensions { if e.Id.Equal(oid) { return true } } return false } // marshalSANs marshals a list of addresses into a the contents of an X.509 // SubjectAlternativeName extension. func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) { var rawValues []asn1.RawValue for _, name := range dnsNames { if err := isIA5String(name); err != nil { return nil, err } rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) } for _, email := range emailAddresses { if err := isIA5String(email); err != nil { return nil, err } rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) } for _, rawIP := range ipAddresses { // If possible, we always want to encode IPv4 addresses in 4 bytes. ip := rawIP.To4() if ip == nil { ip = rawIP } rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) } for _, uri := range uris { uriStr := uri.String() if err := isIA5String(uriStr); err != nil { return nil, err } rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uriStr)}) } return asn1.Marshal(rawValues) } func isIA5String(s string) error { for _, r := range s { // Per RFC5280 "IA5String is limited to the set of ASCII characters" if r > unicode.MaxASCII { return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) } } return nil } var x509usepolicies = godebug.New("x509usepolicies") func buildCertExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte, subjectKeyId []byte) (ret []pkix.Extension, err error) { ret = make([]pkix.Extension, 10 /* maximum number of elements. */) n := 0 if template.KeyUsage != 0 && !oidInExtensions(oidExtensionKeyUsage, template.ExtraExtensions) { ret[n], err = marshalKeyUsage(template.KeyUsage) if err != nil { return nil, err } n++ } if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) && !oidInExtensions(oidExtensionExtendedKeyUsage, template.ExtraExtensions) { ret[n], err = marshalExtKeyUsage(template.ExtKeyUsage, template.UnknownExtKeyUsage) if err != nil { return nil, err } n++ } if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) { ret[n], err = marshalBasicConstraints(template.IsCA, template.MaxPathLen, template.MaxPathLenZero) if err != nil { return nil, err } n++ } if len(subjectKeyId) > 0 && !oidInExtensions(oidExtensionSubjectKeyId, template.ExtraExtensions) { ret[n].Id = oidExtensionSubjectKeyId ret[n].Value, err = asn1.Marshal(subjectKeyId) if err != nil { return } n++ } if len(authorityKeyId) > 0 && !oidInExtensions(oidExtensionAuthorityKeyId, template.ExtraExtensions) { ret[n].Id = oidExtensionAuthorityKeyId ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId}) if err != nil { return } n++ } if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && !oidInExtensions(oidExtensionAuthorityInfoAccess, template.ExtraExtensions) { ret[n].Id = oidExtensionAuthorityInfoAccess var aiaValues []authorityInfoAccess for _, name := range template.OCSPServer { aiaValues = append(aiaValues, authorityInfoAccess{ Method: oidAuthorityInfoAccessOcsp, Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}, }) } for _, name := range template.IssuingCertificateURL { aiaValues = append(aiaValues, authorityInfoAccess{ Method: oidAuthorityInfoAccessIssuers, Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}, }) } ret[n].Value, err = asn1.Marshal(aiaValues) if err != nil { return } n++ } if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) { ret[n].Id = oidExtensionSubjectAltName // From RFC 5280, Section 4.2.1.6: // “If the subject field contains an empty sequence ... then // subjectAltName extension ... is marked as critical” ret[n].Critical = subjectIsEmpty ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) if err != nil { return } n++ } usePolicies := x509usepolicies.Value() == "1" if ((!usePolicies && len(template.PolicyIdentifiers) > 0) || (usePolicies && len(template.Policies) > 0)) && !oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) { ret[n], err = marshalCertificatePolicies(template.Policies, template.PolicyIdentifiers) if err != nil { return nil, err } n++ } if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 || len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 || len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 || len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) && !oidInExtensions(oidExtensionNameConstraints, template.ExtraExtensions) { ret[n].Id = oidExtensionNameConstraints ret[n].Critical = template.PermittedDNSDomainsCritical ipAndMask := func(ipNet *net.IPNet) []byte { maskedIP := ipNet.IP.Mask(ipNet.Mask) ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask)) ipAndMask = append(ipAndMask, maskedIP...) ipAndMask = append(ipAndMask, ipNet.Mask...) return ipAndMask } serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) { var b cryptobyte.Builder for _, name := range dns { if err = isIA5String(name); err != nil { return nil, err } b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) { b.AddBytes([]byte(name)) }) }) } for _, ipNet := range ips { b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) { b.AddBytes(ipAndMask(ipNet)) }) }) } for _, email := range emails { if err = isIA5String(email); err != nil { return nil, err } b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) { b.AddBytes([]byte(email)) }) }) } for _, uriDomain := range uriDomains { if err = isIA5String(uriDomain); err != nil { return nil, err } b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) { b.AddBytes([]byte(uriDomain)) }) }) } return b.Bytes() } permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains) if err != nil { return nil, err } excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains) if err != nil { return nil, err } var b cryptobyte.Builder b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { if len(permitted) > 0 { b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { b.AddBytes(permitted) }) } if len(excluded) > 0 { b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { b.AddBytes(excluded) }) } }) ret[n].Value, err = b.Bytes() if err != nil { return nil, err } n++ } if len(template.CRLDistributionPoints) > 0 && !oidInExtensions(oidExtensionCRLDistributionPoints, template.ExtraExtensions) { ret[n].Id = oidExtensionCRLDistributionPoints var crlDp []distributionPoint for _, name := range template.CRLDistributionPoints { dp := distributionPoint{ DistributionPoint: distributionPointName{ FullName: []asn1.RawValue{ {Tag: 6, Class: 2, Bytes: []byte(name)}, }, }, } crlDp = append(crlDp, dp) } ret[n].Value, err = asn1.Marshal(crlDp) if err != nil { return } n++ } // Adding another extension here? Remember to update the maximum number // of elements in the make() at the top of the function and the list of // template fields used in CreateCertificate documentation. return append(ret[:n], template.ExtraExtensions...), nil } func marshalKeyUsage(ku KeyUsage) (pkix.Extension, error) { ext := pkix.Extension{Id: oidExtensionKeyUsage, Critical: true} var a [2]byte a[0] = reverseBitsInAByte(byte(ku)) a[1] = reverseBitsInAByte(byte(ku >> 8)) l := 1 if a[1] != 0 { l = 2 } bitString := a[:l] var err error ext.Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)}) return ext, err } func marshalExtKeyUsage(extUsages []ExtKeyUsage, unknownUsages []asn1.ObjectIdentifier) (pkix.Extension, error) { ext := pkix.Extension{Id: oidExtensionExtendedKeyUsage} oids := make([]asn1.ObjectIdentifier, len(extUsages)+len(unknownUsages)) for i, u := range extUsages { if oid, ok := oidFromExtKeyUsage(u); ok { oids[i] = oid } else { return ext, errors.New("x509: unknown extended key usage") } } copy(oids[len(extUsages):], unknownUsages) var err error ext.Value, err = asn1.Marshal(oids) return ext, err } func marshalBasicConstraints(isCA bool, maxPathLen int, maxPathLenZero bool) (pkix.Extension, error) { ext := pkix.Extension{Id: oidExtensionBasicConstraints, Critical: true} // Leaving MaxPathLen as zero indicates that no maximum path // length is desired, unless MaxPathLenZero is set. A value of // -1 causes encoding/asn1 to omit the value as desired. if maxPathLen == 0 && !maxPathLenZero { maxPathLen = -1 } var err error ext.Value, err = asn1.Marshal(basicConstraints{isCA, maxPathLen}) return ext, err } func marshalCertificatePolicies(policies []OID, policyIdentifiers []asn1.ObjectIdentifier) (pkix.Extension, error) { ext := pkix.Extension{Id: oidExtensionCertificatePolicies} b := cryptobyte.NewBuilder(make([]byte, 0, 128)) b.AddASN1(cryptobyte_asn1.SEQUENCE, func(child *cryptobyte.Builder) { if x509usepolicies.Value() == "1" { x509usepolicies.IncNonDefault() for _, v := range policies { child.AddASN1(cryptobyte_asn1.SEQUENCE, func(child *cryptobyte.Builder) { child.AddASN1(cryptobyte_asn1.OBJECT_IDENTIFIER, func(child *cryptobyte.Builder) { if len(v.der) == 0 { child.SetError(errors.New("invalid policy object identifier")) return } child.AddBytes(v.der) }) }) } } else { for _, v := range policyIdentifiers { child.AddASN1(cryptobyte_asn1.SEQUENCE, func(child *cryptobyte.Builder) { child.AddASN1ObjectIdentifier(v) }) } } }) var err error ext.Value, err = b.Bytes() return ext, err } func buildCSRExtensions(template *CertificateRequest) ([]pkix.Extension, error) { var ret []pkix.Extension if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) { sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) if err != nil { return nil, err } ret = append(ret, pkix.Extension{ Id: oidExtensionSubjectAltName, Value: sanBytes, }) } return append(ret, template.ExtraExtensions...), nil } func subjectBytes(cert *Certificate) ([]byte, error) { if len(cert.RawSubject) > 0 { return cert.RawSubject, nil } return asn1.Marshal(cert.Subject.ToRDNSequence()) } // signingParamsForKey returns the signature algorithm and its Algorithm // Identifier to use for signing, based on the key type. If sigAlgo is not zero // then it overrides the default. func signingParamsForKey(key crypto.Signer, sigAlgo SignatureAlgorithm) (SignatureAlgorithm, pkix.AlgorithmIdentifier, error) { var ai pkix.AlgorithmIdentifier var pubType PublicKeyAlgorithm var defaultAlgo SignatureAlgorithm switch pub := key.Public().(type) { case *rsa.PublicKey: pubType = RSA defaultAlgo = SHA256WithRSA case *ecdsa.PublicKey: pubType = ECDSA switch pub.Curve { case elliptic.P224(), elliptic.P256(): defaultAlgo = ECDSAWithSHA256 case elliptic.P384(): defaultAlgo = ECDSAWithSHA384 case elliptic.P521(): defaultAlgo = ECDSAWithSHA512 default: return 0, ai, errors.New("x509: unsupported elliptic curve") } case ed25519.PublicKey: pubType = Ed25519 defaultAlgo = PureEd25519 default: return 0, ai, errors.New("x509: only RSA, ECDSA and Ed25519 keys supported") } if sigAlgo == 0 { sigAlgo = defaultAlgo } for _, details := range signatureAlgorithmDetails { if details.algo == sigAlgo { if details.pubKeyAlgo != pubType { return 0, ai, errors.New("x509: requested SignatureAlgorithm does not match private key type") } if details.hash == crypto.MD5 { return 0, ai, errors.New("x509: signing with MD5 is not supported") } return sigAlgo, pkix.AlgorithmIdentifier{ Algorithm: details.oid, Parameters: details.params, }, nil } } return 0, ai, errors.New("x509: unknown SignatureAlgorithm") } func signTBS(tbs []byte, key crypto.Signer, sigAlg SignatureAlgorithm, rand io.Reader) ([]byte, error) { signed := tbs hashFunc := sigAlg.hashFunc() if hashFunc != 0 { h := hashFunc.New() h.Write(signed) signed = h.Sum(nil) } var signerOpts crypto.SignerOpts = hashFunc if sigAlg.isRSAPSS() { signerOpts = &rsa.PSSOptions{ SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: hashFunc, } } signature, err := key.Sign(rand, signed, signerOpts) if err != nil { return nil, err } // Check the signature to ensure the crypto.Signer behaved correctly. if err := checkSignature(sigAlg, tbs, signature, key.Public(), true); err != nil { return nil, fmt.Errorf("x509: signature returned by signer is invalid: %w", err) } return signature, nil } // emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is // just an empty SEQUENCE. var emptyASN1Subject = []byte{0x30, 0} // CreateCertificate creates a new X.509 v3 certificate based on a template. // The following members of template are currently used: // // - AuthorityKeyId // - BasicConstraintsValid // - CRLDistributionPoints // - DNSNames // - EmailAddresses // - ExcludedDNSDomains // - ExcludedEmailAddresses // - ExcludedIPRanges // - ExcludedURIDomains // - ExtKeyUsage // - ExtraExtensions // - IPAddresses // - IsCA // - IssuingCertificateURL // - KeyUsage // - MaxPathLen // - MaxPathLenZero // - NotAfter // - NotBefore // - OCSPServer // - PermittedDNSDomains // - PermittedDNSDomainsCritical // - PermittedEmailAddresses // - PermittedIPRanges // - PermittedURIDomains // - PolicyIdentifiers (see note below) // - Policies (see note below) // - SerialNumber // - SignatureAlgorithm // - Subject // - SubjectKeyId // - URIs // - UnknownExtKeyUsage // // The certificate is signed by parent. If parent is equal to template then the // certificate is self-signed. The parameter pub is the public key of the // certificate to be generated and priv is the private key of the signer. // // The returned slice is the certificate in DER encoding. // // The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and // ed25519.PublicKey. pub must be a supported key type, and priv must be a // crypto.Signer with a supported public key. // // The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any, // unless the resulting certificate is self-signed. Otherwise the value from // template will be used. // // If SubjectKeyId from template is empty and the template is a CA, SubjectKeyId // will be generated from the hash of the public key. // // The PolicyIdentifier and Policies fields are both used to marshal certificate // policy OIDs. By default, only the PolicyIdentifier is marshaled, but if the // GODEBUG setting "x509usepolicies" has the value "1", the Policies field will // be marshaled instead of the PolicyIdentifier field. The Policies field can // be used to marshal policy OIDs which have components that are larger than 31 // bits. func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv any) ([]byte, error) { key, ok := priv.(crypto.Signer) if !ok { return nil, errors.New("x509: certificate private key does not implement crypto.Signer") } if template.SerialNumber == nil { return nil, errors.New("x509: no SerialNumber given") } // RFC 5280 Section 4.1.2.2: serial number must positive // // We _should_ also restrict serials to <= 20 octets, but it turns out a lot of people // get this wrong, in part because the encoding can itself alter the length of the // serial. For now we accept these non-conformant serials. if template.SerialNumber.Sign() == -1 { return nil, errors.New("x509: serial number must be positive") } if template.BasicConstraintsValid && !template.IsCA && template.MaxPathLen != -1 && (template.MaxPathLen != 0 || template.MaxPathLenZero) { return nil, errors.New("x509: only CAs are allowed to specify MaxPathLen") } signatureAlgorithm, algorithmIdentifier, err := signingParamsForKey(key, template.SignatureAlgorithm) if err != nil { return nil, err } publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub) if err != nil { return nil, err } if getPublicKeyAlgorithmFromOID(publicKeyAlgorithm.Algorithm) == UnknownPublicKeyAlgorithm { return nil, fmt.Errorf("x509: unsupported public key type: %T", pub) } asn1Issuer, err := subjectBytes(parent) if err != nil { return nil, err } asn1Subject, err := subjectBytes(template) if err != nil { return nil, err } authorityKeyId := template.AuthorityKeyId if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 { authorityKeyId = parent.SubjectKeyId } subjectKeyId := template.SubjectKeyId if len(subjectKeyId) == 0 && template.IsCA { // SubjectKeyId generated using method 1 in RFC 5280, Section 4.2.1.2: // (1) The keyIdentifier is composed of the 160-bit SHA-1 hash of the // value of the BIT STRING subjectPublicKey (excluding the tag, // length, and number of unused bits). h := sha1.Sum(publicKeyBytes) subjectKeyId = h[:] } // Check that the signer's public key matches the private key, if available. type privateKey interface { Equal(crypto.PublicKey) bool } if privPub, ok := key.Public().(privateKey); !ok { return nil, errors.New("x509: internal error: supported public key does not implement Equal") } else if parent.PublicKey != nil && !privPub.Equal(parent.PublicKey) { return nil, errors.New("x509: provided PrivateKey doesn't match parent's PublicKey") } extensions, err := buildCertExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId, subjectKeyId) if err != nil { return nil, err } encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes} c := tbsCertificate{ Version: 2, SerialNumber: template.SerialNumber, SignatureAlgorithm: algorithmIdentifier, Issuer: asn1.RawValue{FullBytes: asn1Issuer}, Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()}, Subject: asn1.RawValue{FullBytes: asn1Subject}, PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey}, Extensions: extensions, } tbsCertContents, err := asn1.Marshal(c) if err != nil { return nil, err } c.Raw = tbsCertContents signature, err := signTBS(tbsCertContents, key, signatureAlgorithm, rand) if err != nil { return nil, err } return asn1.Marshal(certificate{ TBSCertificate: c, SignatureAlgorithm: algorithmIdentifier, SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, }) } // pemCRLPrefix is the magic string that indicates that we have a PEM encoded // CRL. var pemCRLPrefix = []byte("-----BEGIN X509 CRL") // pemType is the type of a PEM encoded CRL. var pemType = "X509 CRL" // ParseCRL parses a CRL from the given bytes. It's often the case that PEM // encoded CRLs will appear where they should be DER encoded, so this function // will transparently handle PEM encoding as long as there isn't any leading // garbage. // // Deprecated: Use [ParseRevocationList] instead. func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) { if bytes.HasPrefix(crlBytes, pemCRLPrefix) { block, _ := pem.Decode(crlBytes) if block != nil && block.Type == pemType { crlBytes = block.Bytes } } return ParseDERCRL(crlBytes) } // ParseDERCRL parses a DER encoded CRL from the given bytes. // // Deprecated: Use [ParseRevocationList] instead. func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) { certList := new(pkix.CertificateList) if rest, err := asn1.Unmarshal(derBytes, certList); err != nil { return nil, err } else if len(rest) != 0 { return nil, errors.New("x509: trailing data after CRL") } return certList, nil } // CreateCRL returns a DER encoded CRL, signed by this Certificate, that // contains the given list of revoked certificates. // // Deprecated: this method does not generate an RFC 5280 conformant X.509 v2 CRL. // To generate a standards compliant CRL, use [CreateRevocationList] instead. func (c *Certificate) CreateCRL(rand io.Reader, priv any, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { key, ok := priv.(crypto.Signer) if !ok { return nil, errors.New("x509: certificate private key does not implement crypto.Signer") } signatureAlgorithm, algorithmIdentifier, err := signingParamsForKey(key, 0) if err != nil { return nil, err } // Force revocation times to UTC per RFC 5280. revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts)) for i, rc := range revokedCerts { rc.RevocationTime = rc.RevocationTime.UTC() revokedCertsUTC[i] = rc } tbsCertList := pkix.TBSCertificateList{ Version: 1, Signature: algorithmIdentifier, Issuer: c.Subject.ToRDNSequence(), ThisUpdate: now.UTC(), NextUpdate: expiry.UTC(), RevokedCertificates: revokedCertsUTC, } // Authority Key Id if len(c.SubjectKeyId) > 0 { var aki pkix.Extension aki.Id = oidExtensionAuthorityKeyId aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId}) if err != nil { return nil, err } tbsCertList.Extensions = append(tbsCertList.Extensions, aki) } tbsCertListContents, err := asn1.Marshal(tbsCertList) if err != nil { return nil, err } tbsCertList.Raw = tbsCertListContents signature, err := signTBS(tbsCertListContents, key, signatureAlgorithm, rand) if err != nil { return nil, err } return asn1.Marshal(pkix.CertificateList{ TBSCertList: tbsCertList, SignatureAlgorithm: algorithmIdentifier, SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, }) } // CertificateRequest represents a PKCS #10, certificate signature request. type CertificateRequest struct { Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature). RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content. RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. RawSubject []byte // DER encoded Subject. Version int Signature []byte SignatureAlgorithm SignatureAlgorithm PublicKeyAlgorithm PublicKeyAlgorithm PublicKey any Subject pkix.Name // Attributes contains the CSR attributes that can parse as // pkix.AttributeTypeAndValueSET. // // Deprecated: Use Extensions and ExtraExtensions instead for parsing and // generating the requestedExtensions attribute. Attributes []pkix.AttributeTypeAndValueSET // Extensions contains all requested extensions, in raw form. When parsing // CSRs, this can be used to extract extensions that are not parsed by this // package. Extensions []pkix.Extension // ExtraExtensions contains extensions to be copied, raw, into any CSR // marshaled by CreateCertificateRequest. Values override any extensions // that would otherwise be produced based on the other fields but are // overridden by any extensions specified in Attributes. // // The ExtraExtensions field is not populated by ParseCertificateRequest, // see Extensions instead. ExtraExtensions []pkix.Extension // Subject Alternate Name values. DNSNames []string EmailAddresses []string IPAddresses []net.IP URIs []*url.URL } // These structures reflect the ASN.1 structure of X.509 certificate // signature requests (see RFC 2986): type tbsCertificateRequest struct { Raw asn1.RawContent Version int Subject asn1.RawValue PublicKey publicKeyInfo RawAttributes []asn1.RawValue `asn1:"tag:0"` } type certificateRequest struct { Raw asn1.RawContent TBSCSR tbsCertificateRequest SignatureAlgorithm pkix.AlgorithmIdentifier SignatureValue asn1.BitString } // oidExtensionRequest is a PKCS #9 OBJECT IDENTIFIER that indicates requested // extensions in a CSR. var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14} // newRawAttributes converts AttributeTypeAndValueSETs from a template // CertificateRequest's Attributes into tbsCertificateRequest RawAttributes. func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) { var rawAttributes []asn1.RawValue b, err := asn1.Marshal(attributes) if err != nil { return nil, err } rest, err := asn1.Unmarshal(b, &rawAttributes) if err != nil { return nil, err } if len(rest) != 0 { return nil, errors.New("x509: failed to unmarshal raw CSR Attributes") } return rawAttributes, nil } // parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs. func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET { var attributes []pkix.AttributeTypeAndValueSET for _, rawAttr := range rawAttributes { var attr pkix.AttributeTypeAndValueSET rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr) // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET // (i.e.: challengePassword or unstructuredName). if err == nil && len(rest) == 0 { attributes = append(attributes, attr) } } return attributes } // parseCSRExtensions parses the attributes from a CSR and extracts any // requested extensions. func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) { // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1. type pkcs10Attribute struct { Id asn1.ObjectIdentifier Values []asn1.RawValue `asn1:"set"` } var ret []pkix.Extension requestedExts := make(map[string]bool) for _, rawAttr := range rawAttributes { var attr pkcs10Attribute if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 { // Ignore attributes that don't parse. continue } if !attr.Id.Equal(oidExtensionRequest) { continue } var extensions []pkix.Extension if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil { return nil, err } for _, ext := range extensions { oidStr := ext.Id.String() if requestedExts[oidStr] { return nil, errors.New("x509: certificate request contains duplicate requested extensions") } requestedExts[oidStr] = true } ret = append(ret, extensions...) } return ret, nil } // CreateCertificateRequest creates a new certificate request based on a // template. The following members of template are used: // // - SignatureAlgorithm // - Subject // - DNSNames // - EmailAddresses // - IPAddresses // - URIs // - ExtraExtensions // - Attributes (deprecated) // // priv is the private key to sign the CSR with, and the corresponding public // key will be included in the CSR. It must implement crypto.Signer and its // Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a // ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or // ed25519.PrivateKey satisfies this.) // // The returned slice is the certificate request in DER encoding. func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error) { key, ok := priv.(crypto.Signer) if !ok { return nil, errors.New("x509: certificate private key does not implement crypto.Signer") } signatureAlgorithm, algorithmIdentifier, err := signingParamsForKey(key, template.SignatureAlgorithm) if err != nil { return nil, err } var publicKeyBytes []byte var publicKeyAlgorithm pkix.AlgorithmIdentifier publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public()) if err != nil { return nil, err } extensions, err := buildCSRExtensions(template) if err != nil { return nil, err } // Make a copy of template.Attributes because we may alter it below. attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes)) for _, attr := range template.Attributes { values := make([][]pkix.AttributeTypeAndValue, len(attr.Value)) copy(values, attr.Value) attributes = append(attributes, pkix.AttributeTypeAndValueSET{ Type: attr.Type, Value: values, }) } extensionsAppended := false if len(extensions) > 0 { // Append the extensions to an existing attribute if possible. for _, atvSet := range attributes { if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 { continue } // specifiedExtensions contains all the extensions that we // found specified via template.Attributes. specifiedExtensions := make(map[string]bool) for _, atvs := range atvSet.Value { for _, atv := range atvs { specifiedExtensions[atv.Type.String()] = true } } newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions)) newValue = append(newValue, atvSet.Value[0]...) for _, e := range extensions { if specifiedExtensions[e.Id.String()] { // Attributes already contained a value for // this extension and it takes priority. continue } newValue = append(newValue, pkix.AttributeTypeAndValue{ // There is no place for the critical // flag in an AttributeTypeAndValue. Type: e.Id, Value: e.Value, }) } atvSet.Value[0] = newValue extensionsAppended = true break } } rawAttributes, err := newRawAttributes(attributes) if err != nil { return nil, err } // If not included in attributes, add a new attribute for the // extensions. if len(extensions) > 0 && !extensionsAppended { attr := struct { Type asn1.ObjectIdentifier Value [][]pkix.Extension `asn1:"set"` }{ Type: oidExtensionRequest, Value: [][]pkix.Extension{extensions}, } b, err := asn1.Marshal(attr) if err != nil { return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error()) } var rawValue asn1.RawValue if _, err := asn1.Unmarshal(b, &rawValue); err != nil { return nil, err } rawAttributes = append(rawAttributes, rawValue) } asn1Subject := template.RawSubject if len(asn1Subject) == 0 { asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence()) if err != nil { return nil, err } } tbsCSR := tbsCertificateRequest{ Version: 0, // PKCS #10, RFC 2986 Subject: asn1.RawValue{FullBytes: asn1Subject}, PublicKey: publicKeyInfo{ Algorithm: publicKeyAlgorithm, PublicKey: asn1.BitString{ Bytes: publicKeyBytes, BitLength: len(publicKeyBytes) * 8, }, }, RawAttributes: rawAttributes, } tbsCSRContents, err := asn1.Marshal(tbsCSR) if err != nil { return nil, err } tbsCSR.Raw = tbsCSRContents signature, err := signTBS(tbsCSRContents, key, signatureAlgorithm, rand) if err != nil { return nil, err } return asn1.Marshal(certificateRequest{ TBSCSR: tbsCSR, SignatureAlgorithm: algorithmIdentifier, SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, }) } // ParseCertificateRequest parses a single certificate request from the // given ASN.1 DER data. func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) { var csr certificateRequest rest, err := asn1.Unmarshal(asn1Data, &csr) if err != nil { return nil, err } else if len(rest) != 0 { return nil, asn1.SyntaxError{Msg: "trailing data"} } return parseCertificateRequest(&csr) } func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { out := &CertificateRequest{ Raw: in.Raw, RawTBSCertificateRequest: in.TBSCSR.Raw, RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, RawSubject: in.TBSCSR.Subject.FullBytes, Signature: in.SignatureValue.RightAlign(), SignatureAlgorithm: getSignatureAlgorithmFromAI(in.SignatureAlgorithm), PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm), Version: in.TBSCSR.Version, Attributes: parseRawAttributes(in.TBSCSR.RawAttributes), } var err error if out.PublicKeyAlgorithm != UnknownPublicKeyAlgorithm { out.PublicKey, err = parsePublicKey(&in.TBSCSR.PublicKey) if err != nil { return nil, err } } var subject pkix.RDNSequence if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil { return nil, err } else if len(rest) != 0 { return nil, errors.New("x509: trailing data after X.509 Subject") } out.Subject.FillFromRDNSequence(&subject) if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil { return nil, err } for _, extension := range out.Extensions { switch { case extension.Id.Equal(oidExtensionSubjectAltName): out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value) if err != nil { return nil, err } } } return out, nil } // CheckSignature reports whether the signature on c is valid. func (c *CertificateRequest) CheckSignature() error { return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey, true) } // RevocationListEntry represents an entry in the revokedCertificates // sequence of a CRL. type RevocationListEntry struct { // Raw contains the raw bytes of the revokedCertificates entry. It is set when // parsing a CRL; it is ignored when generating a CRL. Raw []byte // SerialNumber represents the serial number of a revoked certificate. It is // both used when creating a CRL and populated when parsing a CRL. It must not // be nil. SerialNumber *big.Int // RevocationTime represents the time at which the certificate was revoked. It // is both used when creating a CRL and populated when parsing a CRL. It must // not be the zero time. RevocationTime time.Time // ReasonCode represents the reason for revocation, using the integer enum // values specified in RFC 5280 Section 5.3.1. When creating a CRL, the zero // value will result in the reasonCode extension being omitted. When parsing a // CRL, the zero value may represent either the reasonCode extension being // absent (which implies the default revocation reason of 0/Unspecified), or // it may represent the reasonCode extension being present and explicitly // containing a value of 0/Unspecified (which should not happen according to // the DER encoding rules, but can and does happen anyway). ReasonCode int // Extensions contains raw X.509 extensions. When parsing CRL entries, // this can be used to extract non-critical extensions that are not // parsed by this package. When marshaling CRL entries, the Extensions // field is ignored, see ExtraExtensions. Extensions []pkix.Extension // ExtraExtensions contains extensions to be copied, raw, into any // marshaled CRL entries. Values override any extensions that would // otherwise be produced based on the other fields. The ExtraExtensions // field is not populated when parsing CRL entries, see Extensions. ExtraExtensions []pkix.Extension } // RevocationList represents a [Certificate] Revocation List (CRL) as specified // by RFC 5280. type RevocationList struct { // Raw contains the complete ASN.1 DER content of the CRL (tbsCertList, // signatureAlgorithm, and signatureValue.) Raw []byte // RawTBSRevocationList contains just the tbsCertList portion of the ASN.1 // DER. RawTBSRevocationList []byte // RawIssuer contains the DER encoded Issuer. RawIssuer []byte // Issuer contains the DN of the issuing certificate. Issuer pkix.Name // AuthorityKeyId is used to identify the public key associated with the // issuing certificate. It is populated from the authorityKeyIdentifier // extension when parsing a CRL. It is ignored when creating a CRL; the // extension is populated from the issuing certificate itself. AuthorityKeyId []byte Signature []byte // SignatureAlgorithm is used to determine the signature algorithm to be // used when signing the CRL. If 0 the default algorithm for the signing // key will be used. SignatureAlgorithm SignatureAlgorithm // RevokedCertificateEntries represents the revokedCertificates sequence in // the CRL. It is used when creating a CRL and also populated when parsing a // CRL. When creating a CRL, it may be empty or nil, in which case the // revokedCertificates ASN.1 sequence will be omitted from the CRL entirely. RevokedCertificateEntries []RevocationListEntry // RevokedCertificates is used to populate the revokedCertificates // sequence in the CRL if RevokedCertificateEntries is empty. It may be empty // or nil, in which case an empty CRL will be created. // // Deprecated: Use RevokedCertificateEntries instead. RevokedCertificates []pkix.RevokedCertificate // Number is used to populate the X.509 v2 cRLNumber extension in the CRL, // which should be a monotonically increasing sequence number for a given // CRL scope and CRL issuer. It is also populated from the cRLNumber // extension when parsing a CRL. Number *big.Int // ThisUpdate is used to populate the thisUpdate field in the CRL, which // indicates the issuance date of the CRL. ThisUpdate time.Time // NextUpdate is used to populate the nextUpdate field in the CRL, which // indicates the date by which the next CRL will be issued. NextUpdate // must be greater than ThisUpdate. NextUpdate time.Time // Extensions contains raw X.509 extensions. When creating a CRL, // the Extensions field is ignored, see ExtraExtensions. Extensions []pkix.Extension // ExtraExtensions contains any additional extensions to add directly to // the CRL. ExtraExtensions []pkix.Extension } // These structures reflect the ASN.1 structure of X.509 CRLs better than // the existing crypto/x509/pkix variants do. These mirror the existing // certificate structs in this file. // // Notably, we include issuer as an asn1.RawValue, mirroring the behavior of // tbsCertificate and allowing raw (unparsed) subjects to be passed cleanly. type certificateList struct { TBSCertList tbsCertificateList SignatureAlgorithm pkix.AlgorithmIdentifier SignatureValue asn1.BitString } type tbsCertificateList struct { Raw asn1.RawContent Version int `asn1:"optional,default:0"` Signature pkix.AlgorithmIdentifier Issuer asn1.RawValue ThisUpdate time.Time NextUpdate time.Time `asn1:"optional"` RevokedCertificates []pkix.RevokedCertificate `asn1:"optional"` Extensions []pkix.Extension `asn1:"tag:0,optional,explicit"` } // CreateRevocationList creates a new X.509 v2 [Certificate] Revocation List, // according to RFC 5280, based on template. // // The CRL is signed by priv which should be the private key associated with // the public key in the issuer certificate. // // The issuer may not be nil, and the crlSign bit must be set in [KeyUsage] in // order to use it as a CRL issuer. // // The issuer distinguished name CRL field and authority key identifier // extension are populated using the issuer certificate. issuer must have // SubjectKeyId set. func CreateRevocationList(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error) { if template == nil { return nil, errors.New("x509: template can not be nil") } if issuer == nil { return nil, errors.New("x509: issuer can not be nil") } if (issuer.KeyUsage & KeyUsageCRLSign) == 0 { return nil, errors.New("x509: issuer must have the crlSign key usage bit set") } if len(issuer.SubjectKeyId) == 0 { return nil, errors.New("x509: issuer certificate doesn't contain a subject key identifier") } if template.NextUpdate.Before(template.ThisUpdate) { return nil, errors.New("x509: template.ThisUpdate is after template.NextUpdate") } if template.Number == nil { return nil, errors.New("x509: template contains nil Number field") } signatureAlgorithm, algorithmIdentifier, err := signingParamsForKey(priv, template.SignatureAlgorithm) if err != nil { return nil, err } var revokedCerts []pkix.RevokedCertificate // Only process the deprecated RevokedCertificates field if it is populated // and the new RevokedCertificateEntries field is not populated. if len(template.RevokedCertificates) > 0 && len(template.RevokedCertificateEntries) == 0 { // Force revocation times to UTC per RFC 5280. revokedCerts = make([]pkix.RevokedCertificate, len(template.RevokedCertificates)) for i, rc := range template.RevokedCertificates { rc.RevocationTime = rc.RevocationTime.UTC() revokedCerts[i] = rc } } else { // Convert the ReasonCode field to a proper extension, and force revocation // times to UTC per RFC 5280. revokedCerts = make([]pkix.RevokedCertificate, len(template.RevokedCertificateEntries)) for i, rce := range template.RevokedCertificateEntries { if rce.SerialNumber == nil { return nil, errors.New("x509: template contains entry with nil SerialNumber field") } if rce.RevocationTime.IsZero() { return nil, errors.New("x509: template contains entry with zero RevocationTime field") } rc := pkix.RevokedCertificate{ SerialNumber: rce.SerialNumber, RevocationTime: rce.RevocationTime.UTC(), } // Copy over any extra extensions, except for a Reason Code extension, // because we'll synthesize that ourselves to ensure it is correct. exts := make([]pkix.Extension, 0, len(rce.ExtraExtensions)) for _, ext := range rce.ExtraExtensions { if ext.Id.Equal(oidExtensionReasonCode) { return nil, errors.New("x509: template contains entry with ReasonCode ExtraExtension; use ReasonCode field instead") } exts = append(exts, ext) } // Only add a reasonCode extension if the reason is non-zero, as per // RFC 5280 Section 5.3.1. if rce.ReasonCode != 0 { reasonBytes, err := asn1.Marshal(asn1.Enumerated(rce.ReasonCode)) if err != nil { return nil, err } exts = append(exts, pkix.Extension{ Id: oidExtensionReasonCode, Value: reasonBytes, }) } if len(exts) > 0 { rc.Extensions = exts } revokedCerts[i] = rc } } aki, err := asn1.Marshal(authKeyId{Id: issuer.SubjectKeyId}) if err != nil { return nil, err } if numBytes := template.Number.Bytes(); len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) { return nil, errors.New("x509: CRL number exceeds 20 octets") } crlNum, err := asn1.Marshal(template.Number) if err != nil { return nil, err } // Correctly use the issuer's subject sequence if one is specified. issuerSubject, err := subjectBytes(issuer) if err != nil { return nil, err } tbsCertList := tbsCertificateList{ Version: 1, // v2 Signature: algorithmIdentifier, Issuer: asn1.RawValue{FullBytes: issuerSubject}, ThisUpdate: template.ThisUpdate.UTC(), NextUpdate: template.NextUpdate.UTC(), Extensions: []pkix.Extension{ { Id: oidExtensionAuthorityKeyId, Value: aki, }, { Id: oidExtensionCRLNumber, Value: crlNum, }, }, } if len(revokedCerts) > 0 { tbsCertList.RevokedCertificates = revokedCerts } if len(template.ExtraExtensions) > 0 { tbsCertList.Extensions = append(tbsCertList.Extensions, template.ExtraExtensions...) } tbsCertListContents, err := asn1.Marshal(tbsCertList) if err != nil { return nil, err } // Optimization to only marshal this struct once, when signing and // then embedding in certificateList below. tbsCertList.Raw = tbsCertListContents signature, err := signTBS(tbsCertListContents, priv, signatureAlgorithm, rand) if err != nil { return nil, err } return asn1.Marshal(certificateList{ TBSCertList: tbsCertList, SignatureAlgorithm: algorithmIdentifier, SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, }) } // CheckSignatureFrom verifies that the signature on rl is a valid signature // from issuer. func (rl *RevocationList) CheckSignatureFrom(parent *Certificate) error { if parent.Version == 3 && !parent.BasicConstraintsValid || parent.BasicConstraintsValid && !parent.IsCA { return ConstraintViolationError{} } if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCRLSign == 0 { return ConstraintViolationError{} } if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { return ErrUnsupportedAlgorithm } return parent.CheckSignature(rl.SignatureAlgorithm, rl.RawTBSRevocationList, rl.Signature) }
go/src/crypto/x509/x509.go/0
{ "file_path": "go/src/crypto/x509/x509.go", "repo_id": "go", "token_count": 30845 }
229
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package buildinfo provides access to information embedded in a Go binary // about how it was built. This includes the Go toolchain version, and the // set of modules used (for binaries built in module mode). // // Build information is available for the currently running binary in // runtime/debug.ReadBuildInfo. package buildinfo import ( "bytes" "debug/elf" "debug/macho" "debug/pe" "debug/plan9obj" "encoding/binary" "errors" "fmt" "internal/saferio" "internal/xcoff" "io" "io/fs" "os" "runtime/debug" _ "unsafe" // for linkname ) // Type alias for build info. We cannot move the types here, since // runtime/debug would need to import this package, which would make it // a much larger dependency. type BuildInfo = debug.BuildInfo // errUnrecognizedFormat is returned when a given executable file doesn't // appear to be in a known format, or it breaks the rules of that format, // or when there are I/O errors reading the file. var errUnrecognizedFormat = errors.New("unrecognized file format") // errNotGoExe is returned when a given executable file is valid but does // not contain Go build information. // // errNotGoExe should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/quay/claircore // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname errNotGoExe var errNotGoExe = errors.New("not a Go executable") // The build info blob left by the linker is identified by // a 16-byte header, consisting of buildInfoMagic (14 bytes), // the binary's pointer size (1 byte), // and whether the binary is big endian (1 byte). var buildInfoMagic = []byte("\xff Go buildinf:") // ReadFile returns build information embedded in a Go binary // file at the given path. Most information is only available for binaries built // with module support. func ReadFile(name string) (info *BuildInfo, err error) { defer func() { if pathErr := (*fs.PathError)(nil); errors.As(err, &pathErr) { err = fmt.Errorf("could not read Go build info: %w", err) } else if err != nil { err = fmt.Errorf("could not read Go build info from %s: %w", name, err) } }() f, err := os.Open(name) if err != nil { return nil, err } defer f.Close() return Read(f) } // Read returns build information embedded in a Go binary file // accessed through the given ReaderAt. Most information is only available for // binaries built with module support. func Read(r io.ReaderAt) (*BuildInfo, error) { vers, mod, err := readRawBuildInfo(r) if err != nil { return nil, err } bi, err := debug.ParseBuildInfo(mod) if err != nil { return nil, err } bi.GoVersion = vers return bi, nil } type exe interface { // ReadData reads and returns up to size bytes starting at virtual address addr. ReadData(addr, size uint64) ([]byte, error) // DataStart returns the virtual address and size of the segment or section that // should contain build information. This is either a specially named section // or the first writable non-zero data segment. DataStart() (uint64, uint64) } // readRawBuildInfo extracts the Go toolchain version and module information // strings from a Go binary. On success, vers should be non-empty. mod // is empty if the binary was not built with modules enabled. func readRawBuildInfo(r io.ReaderAt) (vers, mod string, err error) { // Read the first bytes of the file to identify the format, then delegate to // a format-specific function to load segment and section headers. ident := make([]byte, 16) if n, err := r.ReadAt(ident, 0); n < len(ident) || err != nil { return "", "", errUnrecognizedFormat } var x exe switch { case bytes.HasPrefix(ident, []byte("\x7FELF")): f, err := elf.NewFile(r) if err != nil { return "", "", errUnrecognizedFormat } x = &elfExe{f} case bytes.HasPrefix(ident, []byte("MZ")): f, err := pe.NewFile(r) if err != nil { return "", "", errUnrecognizedFormat } x = &peExe{f} case bytes.HasPrefix(ident, []byte("\xFE\xED\xFA")) || bytes.HasPrefix(ident[1:], []byte("\xFA\xED\xFE")): f, err := macho.NewFile(r) if err != nil { return "", "", errUnrecognizedFormat } x = &machoExe{f} case bytes.HasPrefix(ident, []byte("\xCA\xFE\xBA\xBE")) || bytes.HasPrefix(ident, []byte("\xCA\xFE\xBA\xBF")): f, err := macho.NewFatFile(r) if err != nil || len(f.Arches) == 0 { return "", "", errUnrecognizedFormat } x = &machoExe{f.Arches[0].File} case bytes.HasPrefix(ident, []byte{0x01, 0xDF}) || bytes.HasPrefix(ident, []byte{0x01, 0xF7}): f, err := xcoff.NewFile(r) if err != nil { return "", "", errUnrecognizedFormat } x = &xcoffExe{f} case hasPlan9Magic(ident): f, err := plan9obj.NewFile(r) if err != nil { return "", "", errUnrecognizedFormat } x = &plan9objExe{f} default: return "", "", errUnrecognizedFormat } // Read segment or section to find the build info blob. // On some platforms, the blob will be in its own section, and DataStart // returns the address of that section. On others, it's somewhere in the // data segment; the linker puts it near the beginning. // See cmd/link/internal/ld.Link.buildinfo. dataAddr, dataSize := x.DataStart() if dataSize == 0 { return "", "", errNotGoExe } data, err := x.ReadData(dataAddr, dataSize) if err != nil { return "", "", err } const ( buildInfoAlign = 16 buildInfoSize = 32 ) for { i := bytes.Index(data, buildInfoMagic) if i < 0 || len(data)-i < buildInfoSize { return "", "", errNotGoExe } if i%buildInfoAlign == 0 && len(data)-i >= buildInfoSize { data = data[i:] break } data = data[(i+buildInfoAlign-1)&^(buildInfoAlign-1):] } // Decode the blob. // The first 14 bytes are buildInfoMagic. // The next two bytes indicate pointer size in bytes (4 or 8) and endianness // (0 for little, 1 for big). // Two virtual addresses to Go strings follow that: runtime.buildVersion, // and runtime.modinfo. // On 32-bit platforms, the last 8 bytes are unused. // If the endianness has the 2 bit set, then the pointers are zero // and the 32-byte header is followed by varint-prefixed string data // for the two string values we care about. ptrSize := int(data[14]) if data[15]&2 != 0 { vers, data = decodeString(data[32:]) mod, data = decodeString(data) } else { bigEndian := data[15] != 0 var bo binary.ByteOrder if bigEndian { bo = binary.BigEndian } else { bo = binary.LittleEndian } var readPtr func([]byte) uint64 if ptrSize == 4 { readPtr = func(b []byte) uint64 { return uint64(bo.Uint32(b)) } } else if ptrSize == 8 { readPtr = bo.Uint64 } else { return "", "", errNotGoExe } vers = readString(x, ptrSize, readPtr, readPtr(data[16:])) mod = readString(x, ptrSize, readPtr, readPtr(data[16+ptrSize:])) } if vers == "" { return "", "", errNotGoExe } if len(mod) >= 33 && mod[len(mod)-17] == '\n' { // Strip module framing: sentinel strings delimiting the module info. // These are cmd/go/internal/modload.infoStart and infoEnd. mod = mod[16 : len(mod)-16] } else { mod = "" } return vers, mod, nil } func hasPlan9Magic(magic []byte) bool { if len(magic) >= 4 { m := binary.BigEndian.Uint32(magic) switch m { case plan9obj.Magic386, plan9obj.MagicAMD64, plan9obj.MagicARM: return true } } return false } func decodeString(data []byte) (s string, rest []byte) { u, n := binary.Uvarint(data) if n <= 0 || u > uint64(len(data)-n) { return "", nil } return string(data[n : uint64(n)+u]), data[uint64(n)+u:] } // readString returns the string at address addr in the executable x. func readString(x exe, ptrSize int, readPtr func([]byte) uint64, addr uint64) string { hdr, err := x.ReadData(addr, uint64(2*ptrSize)) if err != nil || len(hdr) < 2*ptrSize { return "" } dataAddr := readPtr(hdr) dataLen := readPtr(hdr[ptrSize:]) data, err := x.ReadData(dataAddr, dataLen) if err != nil || uint64(len(data)) < dataLen { return "" } return string(data) } // elfExe is the ELF implementation of the exe interface. type elfExe struct { f *elf.File } func (x *elfExe) ReadData(addr, size uint64) ([]byte, error) { for _, prog := range x.f.Progs { if prog.Vaddr <= addr && addr <= prog.Vaddr+prog.Filesz-1 { n := prog.Vaddr + prog.Filesz - addr if n > size { n = size } return saferio.ReadDataAt(prog, n, int64(addr-prog.Vaddr)) } } return nil, errUnrecognizedFormat } func (x *elfExe) DataStart() (uint64, uint64) { for _, s := range x.f.Sections { if s.Name == ".go.buildinfo" { return s.Addr, s.Size } } for _, p := range x.f.Progs { if p.Type == elf.PT_LOAD && p.Flags&(elf.PF_X|elf.PF_W) == elf.PF_W { return p.Vaddr, p.Memsz } } return 0, 0 } // peExe is the PE (Windows Portable Executable) implementation of the exe interface. type peExe struct { f *pe.File } func (x *peExe) imageBase() uint64 { switch oh := x.f.OptionalHeader.(type) { case *pe.OptionalHeader32: return uint64(oh.ImageBase) case *pe.OptionalHeader64: return oh.ImageBase } return 0 } func (x *peExe) ReadData(addr, size uint64) ([]byte, error) { addr -= x.imageBase() for _, sect := range x.f.Sections { if uint64(sect.VirtualAddress) <= addr && addr <= uint64(sect.VirtualAddress+sect.Size-1) { n := uint64(sect.VirtualAddress+sect.Size) - addr if n > size { n = size } return saferio.ReadDataAt(sect, n, int64(addr-uint64(sect.VirtualAddress))) } } return nil, errUnrecognizedFormat } func (x *peExe) DataStart() (uint64, uint64) { // Assume data is first writable section. const ( IMAGE_SCN_CNT_CODE = 0x00000020 IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 IMAGE_SCN_MEM_EXECUTE = 0x20000000 IMAGE_SCN_MEM_READ = 0x40000000 IMAGE_SCN_MEM_WRITE = 0x80000000 IMAGE_SCN_MEM_DISCARDABLE = 0x2000000 IMAGE_SCN_LNK_NRELOC_OVFL = 0x1000000 IMAGE_SCN_ALIGN_32BYTES = 0x600000 ) for _, sect := range x.f.Sections { if sect.VirtualAddress != 0 && sect.Size != 0 && sect.Characteristics&^IMAGE_SCN_ALIGN_32BYTES == IMAGE_SCN_CNT_INITIALIZED_DATA|IMAGE_SCN_MEM_READ|IMAGE_SCN_MEM_WRITE { return uint64(sect.VirtualAddress) + x.imageBase(), uint64(sect.VirtualSize) } } return 0, 0 } // machoExe is the Mach-O (Apple macOS/iOS) implementation of the exe interface. type machoExe struct { f *macho.File } func (x *machoExe) ReadData(addr, size uint64) ([]byte, error) { for _, load := range x.f.Loads { seg, ok := load.(*macho.Segment) if !ok { continue } if seg.Addr <= addr && addr <= seg.Addr+seg.Filesz-1 { if seg.Name == "__PAGEZERO" { continue } n := seg.Addr + seg.Filesz - addr if n > size { n = size } return saferio.ReadDataAt(seg, n, int64(addr-seg.Addr)) } } return nil, errUnrecognizedFormat } func (x *machoExe) DataStart() (uint64, uint64) { // Look for section named "__go_buildinfo". for _, sec := range x.f.Sections { if sec.Name == "__go_buildinfo" { return sec.Addr, sec.Size } } // Try the first non-empty writable segment. const RW = 3 for _, load := range x.f.Loads { seg, ok := load.(*macho.Segment) if ok && seg.Addr != 0 && seg.Filesz != 0 && seg.Prot == RW && seg.Maxprot == RW { return seg.Addr, seg.Memsz } } return 0, 0 } // xcoffExe is the XCOFF (AIX eXtended COFF) implementation of the exe interface. type xcoffExe struct { f *xcoff.File } func (x *xcoffExe) ReadData(addr, size uint64) ([]byte, error) { for _, sect := range x.f.Sections { if sect.VirtualAddress <= addr && addr <= sect.VirtualAddress+sect.Size-1 { n := sect.VirtualAddress + sect.Size - addr if n > size { n = size } return saferio.ReadDataAt(sect, n, int64(addr-sect.VirtualAddress)) } } return nil, errors.New("address not mapped") } func (x *xcoffExe) DataStart() (uint64, uint64) { if s := x.f.SectionByType(xcoff.STYP_DATA); s != nil { return s.VirtualAddress, s.Size } return 0, 0 } // plan9objExe is the Plan 9 a.out implementation of the exe interface. type plan9objExe struct { f *plan9obj.File } func (x *plan9objExe) DataStart() (uint64, uint64) { if s := x.f.Section("data"); s != nil { return uint64(s.Offset), uint64(s.Size) } return 0, 0 } func (x *plan9objExe) ReadData(addr, size uint64) ([]byte, error) { for _, sect := range x.f.Sections { if uint64(sect.Offset) <= addr && addr <= uint64(sect.Offset+sect.Size-1) { n := uint64(sect.Offset+sect.Size) - addr if n > size { n = size } return saferio.ReadDataAt(sect, n, int64(addr-uint64(sect.Offset))) } } return nil, errors.New("address not mapped") }
go/src/debug/buildinfo/buildinfo.go/0
{ "file_path": "go/src/debug/buildinfo/buildinfo.go", "repo_id": "go", "token_count": 5023 }
230
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // cppunsuptypes.elf built with g++ 7.3 // g++ -g -c -o cppunsuptypes.elf cppunsuptypes.cc int i = 3; double d = 3; // anonymous reference type int &culprit = i; // named reference type typedef double &dref; dref dr = d; // incorporated into another type typedef struct { dref q; int &r; } hasrefs; hasrefs hr = { d, i }; // This code is intended to trigger a DWARF "pointer to member" type DIE struct CS { int dm; }; int foo() { int CS::* pdm = &CS::dm; CS cs = {42}; return cs.*pdm; }
go/src/debug/dwarf/testdata/cppunsuptypes.cc/0
{ "file_path": "go/src/debug/dwarf/testdata/cppunsuptypes.cc", "repo_id": "go", "token_count": 249 }
231
// clang -gdwarf-5 -O2 -nostdlib __attribute__((noinline, cold)) static int sum(int i) { int j, s; s = 0; for (j = 0; j < i; j++) { s += j * i; } return s; } int main(int argc, char** argv) { if (argc == 0) { return 0; } return sum(argc); }
go/src/debug/dwarf/testdata/rnglistx.c/0
{ "file_path": "go/src/debug/dwarf/testdata/rnglistx.c", "repo_id": "go", "token_count": 131 }
232
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package elf implements access to ELF object files. # Security This package is not designed to be hardened against adversarial inputs, and is outside the scope of https://go.dev/security/policy. In particular, only basic validation is done when parsing object files. As such, care should be taken when parsing untrusted inputs, as parsing malformed files may consume significant resources, or cause panics. */ package elf import ( "bytes" "compress/zlib" "debug/dwarf" "encoding/binary" "errors" "fmt" "internal/saferio" "internal/zstd" "io" "os" "strings" "unsafe" ) // TODO: error reporting detail /* * Internal ELF representation */ // A FileHeader represents an ELF file header. type FileHeader struct { Class Class Data Data Version Version OSABI OSABI ABIVersion uint8 ByteOrder binary.ByteOrder Type Type Machine Machine Entry uint64 } // A File represents an open ELF file. type File struct { FileHeader Sections []*Section Progs []*Prog closer io.Closer gnuNeed []verneed gnuVersym []byte } // A SectionHeader represents a single ELF section header. type SectionHeader struct { Name string Type SectionType Flags SectionFlag Addr uint64 Offset uint64 Size uint64 Link uint32 Info uint32 Addralign uint64 Entsize uint64 // FileSize is the size of this section in the file in bytes. // If a section is compressed, FileSize is the size of the // compressed data, while Size (above) is the size of the // uncompressed data. FileSize uint64 } // A Section represents a single section in an ELF file. type Section struct { SectionHeader // Embed ReaderAt for ReadAt method. // Do not embed SectionReader directly // to avoid having Read and Seek. // If a client wants Read and Seek it must use // Open() to avoid fighting over the seek offset // with other clients. // // ReaderAt may be nil if the section is not easily available // in a random-access form. For example, a compressed section // may have a nil ReaderAt. io.ReaderAt sr *io.SectionReader compressionType CompressionType compressionOffset int64 } // Data reads and returns the contents of the ELF section. // Even if the section is stored compressed in the ELF file, // Data returns uncompressed data. // // For an [SHT_NOBITS] section, Data always returns a non-nil error. func (s *Section) Data() ([]byte, error) { return saferio.ReadData(s.Open(), s.Size) } // stringTable reads and returns the string table given by the // specified link value. func (f *File) stringTable(link uint32) ([]byte, error) { if link <= 0 || link >= uint32(len(f.Sections)) { return nil, errors.New("section has invalid string table link") } return f.Sections[link].Data() } // Open returns a new ReadSeeker reading the ELF section. // Even if the section is stored compressed in the ELF file, // the ReadSeeker reads uncompressed data. // // For an [SHT_NOBITS] section, all calls to the opened reader // will return a non-nil error. func (s *Section) Open() io.ReadSeeker { if s.Type == SHT_NOBITS { return io.NewSectionReader(&nobitsSectionReader{}, 0, int64(s.Size)) } var zrd func(io.Reader) (io.ReadCloser, error) if s.Flags&SHF_COMPRESSED == 0 { if !strings.HasPrefix(s.Name, ".zdebug") { return io.NewSectionReader(s.sr, 0, 1<<63-1) } b := make([]byte, 12) n, _ := s.sr.ReadAt(b, 0) if n != 12 || string(b[:4]) != "ZLIB" { return io.NewSectionReader(s.sr, 0, 1<<63-1) } s.compressionOffset = 12 s.compressionType = COMPRESS_ZLIB s.Size = binary.BigEndian.Uint64(b[4:12]) zrd = zlib.NewReader } else if s.Flags&SHF_ALLOC != 0 { return errorReader{&FormatError{int64(s.Offset), "SHF_COMPRESSED applies only to non-allocable sections", s.compressionType}} } switch s.compressionType { case COMPRESS_ZLIB: zrd = zlib.NewReader case COMPRESS_ZSTD: zrd = func(r io.Reader) (io.ReadCloser, error) { return io.NopCloser(zstd.NewReader(r)), nil } } if zrd == nil { return errorReader{&FormatError{int64(s.Offset), "unknown compression type", s.compressionType}} } return &readSeekerFromReader{ reset: func() (io.Reader, error) { fr := io.NewSectionReader(s.sr, s.compressionOffset, int64(s.FileSize)-s.compressionOffset) return zrd(fr) }, size: int64(s.Size), } } // A ProgHeader represents a single ELF program header. type ProgHeader struct { Type ProgType Flags ProgFlag Off uint64 Vaddr uint64 Paddr uint64 Filesz uint64 Memsz uint64 Align uint64 } // A Prog represents a single ELF program header in an ELF binary. type Prog struct { ProgHeader // Embed ReaderAt for ReadAt method. // Do not embed SectionReader directly // to avoid having Read and Seek. // If a client wants Read and Seek it must use // Open() to avoid fighting over the seek offset // with other clients. io.ReaderAt sr *io.SectionReader } // Open returns a new ReadSeeker reading the ELF program body. func (p *Prog) Open() io.ReadSeeker { return io.NewSectionReader(p.sr, 0, 1<<63-1) } // A Symbol represents an entry in an ELF symbol table section. type Symbol struct { Name string Info, Other byte Section SectionIndex Value, Size uint64 // Version and Library are present only for the dynamic symbol // table. Version string Library string } /* * ELF reader */ type FormatError struct { off int64 msg string val any } func (e *FormatError) Error() string { msg := e.msg if e.val != nil { msg += fmt.Sprintf(" '%v' ", e.val) } msg += fmt.Sprintf("in record at byte %#x", e.off) return msg } // Open opens the named file using [os.Open] and prepares it for use as an ELF binary. func Open(name string) (*File, error) { f, err := os.Open(name) if err != nil { return nil, err } ff, err := NewFile(f) if err != nil { f.Close() return nil, err } ff.closer = f return ff, nil } // Close closes the [File]. // If the [File] was created using [NewFile] directly instead of [Open], // Close has no effect. func (f *File) Close() error { var err error if f.closer != nil { err = f.closer.Close() f.closer = nil } return err } // SectionByType returns the first section in f with the // given type, or nil if there is no such section. func (f *File) SectionByType(typ SectionType) *Section { for _, s := range f.Sections { if s.Type == typ { return s } } return nil } // NewFile creates a new [File] for accessing an ELF binary in an underlying reader. // The ELF binary is expected to start at position 0 in the ReaderAt. func NewFile(r io.ReaderAt) (*File, error) { sr := io.NewSectionReader(r, 0, 1<<63-1) // Read and decode ELF identifier var ident [16]uint8 if _, err := r.ReadAt(ident[0:], 0); err != nil { return nil, err } if ident[0] != '\x7f' || ident[1] != 'E' || ident[2] != 'L' || ident[3] != 'F' { return nil, &FormatError{0, "bad magic number", ident[0:4]} } f := new(File) f.Class = Class(ident[EI_CLASS]) switch f.Class { case ELFCLASS32: case ELFCLASS64: // ok default: return nil, &FormatError{0, "unknown ELF class", f.Class} } f.Data = Data(ident[EI_DATA]) var bo binary.ByteOrder switch f.Data { case ELFDATA2LSB: bo = binary.LittleEndian case ELFDATA2MSB: bo = binary.BigEndian default: return nil, &FormatError{0, "unknown ELF data encoding", f.Data} } f.ByteOrder = bo f.Version = Version(ident[EI_VERSION]) if f.Version != EV_CURRENT { return nil, &FormatError{0, "unknown ELF version", f.Version} } f.OSABI = OSABI(ident[EI_OSABI]) f.ABIVersion = ident[EI_ABIVERSION] // Read ELF file header var phoff int64 var phentsize, phnum int var shoff int64 var shentsize, shnum, shstrndx int switch f.Class { case ELFCLASS32: var hdr Header32 data := make([]byte, unsafe.Sizeof(hdr)) if _, err := sr.ReadAt(data, 0); err != nil { return nil, err } f.Type = Type(bo.Uint16(data[unsafe.Offsetof(hdr.Type):])) f.Machine = Machine(bo.Uint16(data[unsafe.Offsetof(hdr.Machine):])) f.Entry = uint64(bo.Uint32(data[unsafe.Offsetof(hdr.Entry):])) if v := Version(bo.Uint32(data[unsafe.Offsetof(hdr.Version):])); v != f.Version { return nil, &FormatError{0, "mismatched ELF version", v} } phoff = int64(bo.Uint32(data[unsafe.Offsetof(hdr.Phoff):])) phentsize = int(bo.Uint16(data[unsafe.Offsetof(hdr.Phentsize):])) phnum = int(bo.Uint16(data[unsafe.Offsetof(hdr.Phnum):])) shoff = int64(bo.Uint32(data[unsafe.Offsetof(hdr.Shoff):])) shentsize = int(bo.Uint16(data[unsafe.Offsetof(hdr.Shentsize):])) shnum = int(bo.Uint16(data[unsafe.Offsetof(hdr.Shnum):])) shstrndx = int(bo.Uint16(data[unsafe.Offsetof(hdr.Shstrndx):])) case ELFCLASS64: var hdr Header64 data := make([]byte, unsafe.Sizeof(hdr)) if _, err := sr.ReadAt(data, 0); err != nil { return nil, err } f.Type = Type(bo.Uint16(data[unsafe.Offsetof(hdr.Type):])) f.Machine = Machine(bo.Uint16(data[unsafe.Offsetof(hdr.Machine):])) f.Entry = bo.Uint64(data[unsafe.Offsetof(hdr.Entry):]) if v := Version(bo.Uint32(data[unsafe.Offsetof(hdr.Version):])); v != f.Version { return nil, &FormatError{0, "mismatched ELF version", v} } phoff = int64(bo.Uint64(data[unsafe.Offsetof(hdr.Phoff):])) phentsize = int(bo.Uint16(data[unsafe.Offsetof(hdr.Phentsize):])) phnum = int(bo.Uint16(data[unsafe.Offsetof(hdr.Phnum):])) shoff = int64(bo.Uint64(data[unsafe.Offsetof(hdr.Shoff):])) shentsize = int(bo.Uint16(data[unsafe.Offsetof(hdr.Shentsize):])) shnum = int(bo.Uint16(data[unsafe.Offsetof(hdr.Shnum):])) shstrndx = int(bo.Uint16(data[unsafe.Offsetof(hdr.Shstrndx):])) } if shoff < 0 { return nil, &FormatError{0, "invalid shoff", shoff} } if phoff < 0 { return nil, &FormatError{0, "invalid phoff", phoff} } if shoff == 0 && shnum != 0 { return nil, &FormatError{0, "invalid ELF shnum for shoff=0", shnum} } if shnum > 0 && shstrndx >= shnum { return nil, &FormatError{0, "invalid ELF shstrndx", shstrndx} } var wantPhentsize, wantShentsize int switch f.Class { case ELFCLASS32: wantPhentsize = 8 * 4 wantShentsize = 10 * 4 case ELFCLASS64: wantPhentsize = 2*4 + 6*8 wantShentsize = 4*4 + 6*8 } if phnum > 0 && phentsize < wantPhentsize { return nil, &FormatError{0, "invalid ELF phentsize", phentsize} } // Read program headers f.Progs = make([]*Prog, phnum) phdata, err := saferio.ReadDataAt(sr, uint64(phnum)*uint64(phentsize), phoff) if err != nil { return nil, err } for i := 0; i < phnum; i++ { off := uintptr(i) * uintptr(phentsize) p := new(Prog) switch f.Class { case ELFCLASS32: var ph Prog32 p.ProgHeader = ProgHeader{ Type: ProgType(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Type):])), Flags: ProgFlag(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Flags):])), Off: uint64(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Off):])), Vaddr: uint64(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Vaddr):])), Paddr: uint64(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Paddr):])), Filesz: uint64(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Filesz):])), Memsz: uint64(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Memsz):])), Align: uint64(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Align):])), } case ELFCLASS64: var ph Prog64 p.ProgHeader = ProgHeader{ Type: ProgType(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Type):])), Flags: ProgFlag(bo.Uint32(phdata[off+unsafe.Offsetof(ph.Flags):])), Off: bo.Uint64(phdata[off+unsafe.Offsetof(ph.Off):]), Vaddr: bo.Uint64(phdata[off+unsafe.Offsetof(ph.Vaddr):]), Paddr: bo.Uint64(phdata[off+unsafe.Offsetof(ph.Paddr):]), Filesz: bo.Uint64(phdata[off+unsafe.Offsetof(ph.Filesz):]), Memsz: bo.Uint64(phdata[off+unsafe.Offsetof(ph.Memsz):]), Align: bo.Uint64(phdata[off+unsafe.Offsetof(ph.Align):]), } } if int64(p.Off) < 0 { return nil, &FormatError{phoff + int64(off), "invalid program header offset", p.Off} } if int64(p.Filesz) < 0 { return nil, &FormatError{phoff + int64(off), "invalid program header file size", p.Filesz} } p.sr = io.NewSectionReader(r, int64(p.Off), int64(p.Filesz)) p.ReaderAt = p.sr f.Progs[i] = p } // If the number of sections is greater than or equal to SHN_LORESERVE // (0xff00), shnum has the value zero and the actual number of section // header table entries is contained in the sh_size field of the section // header at index 0. if shoff > 0 && shnum == 0 { var typ, link uint32 sr.Seek(shoff, io.SeekStart) switch f.Class { case ELFCLASS32: sh := new(Section32) if err := binary.Read(sr, bo, sh); err != nil { return nil, err } shnum = int(sh.Size) typ = sh.Type link = sh.Link case ELFCLASS64: sh := new(Section64) if err := binary.Read(sr, bo, sh); err != nil { return nil, err } shnum = int(sh.Size) typ = sh.Type link = sh.Link } if SectionType(typ) != SHT_NULL { return nil, &FormatError{shoff, "invalid type of the initial section", SectionType(typ)} } if shnum < int(SHN_LORESERVE) { return nil, &FormatError{shoff, "invalid ELF shnum contained in sh_size", shnum} } // If the section name string table section index is greater than or // equal to SHN_LORESERVE (0xff00), this member has the value // SHN_XINDEX (0xffff) and the actual index of the section name // string table section is contained in the sh_link field of the // section header at index 0. if shstrndx == int(SHN_XINDEX) { shstrndx = int(link) if shstrndx < int(SHN_LORESERVE) { return nil, &FormatError{shoff, "invalid ELF shstrndx contained in sh_link", shstrndx} } } } if shnum > 0 && shentsize < wantShentsize { return nil, &FormatError{0, "invalid ELF shentsize", shentsize} } // Read section headers c := saferio.SliceCap[Section](uint64(shnum)) if c < 0 { return nil, &FormatError{0, "too many sections", shnum} } f.Sections = make([]*Section, 0, c) names := make([]uint32, 0, c) shdata, err := saferio.ReadDataAt(sr, uint64(shnum)*uint64(shentsize), shoff) if err != nil { return nil, err } for i := 0; i < shnum; i++ { off := uintptr(i) * uintptr(shentsize) s := new(Section) switch f.Class { case ELFCLASS32: var sh Section32 names = append(names, bo.Uint32(shdata[off+unsafe.Offsetof(sh.Name):])) s.SectionHeader = SectionHeader{ Type: SectionType(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Type):])), Flags: SectionFlag(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Flags):])), Addr: uint64(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Addr):])), Offset: uint64(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Off):])), FileSize: uint64(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Size):])), Link: bo.Uint32(shdata[off+unsafe.Offsetof(sh.Link):]), Info: bo.Uint32(shdata[off+unsafe.Offsetof(sh.Info):]), Addralign: uint64(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Addralign):])), Entsize: uint64(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Entsize):])), } case ELFCLASS64: var sh Section64 names = append(names, bo.Uint32(shdata[off+unsafe.Offsetof(sh.Name):])) s.SectionHeader = SectionHeader{ Type: SectionType(bo.Uint32(shdata[off+unsafe.Offsetof(sh.Type):])), Flags: SectionFlag(bo.Uint64(shdata[off+unsafe.Offsetof(sh.Flags):])), Offset: bo.Uint64(shdata[off+unsafe.Offsetof(sh.Off):]), FileSize: bo.Uint64(shdata[off+unsafe.Offsetof(sh.Size):]), Addr: bo.Uint64(shdata[off+unsafe.Offsetof(sh.Addr):]), Link: bo.Uint32(shdata[off+unsafe.Offsetof(sh.Link):]), Info: bo.Uint32(shdata[off+unsafe.Offsetof(sh.Info):]), Addralign: bo.Uint64(shdata[off+unsafe.Offsetof(sh.Addralign):]), Entsize: bo.Uint64(shdata[off+unsafe.Offsetof(sh.Entsize):]), } } if int64(s.Offset) < 0 { return nil, &FormatError{shoff + int64(off), "invalid section offset", int64(s.Offset)} } if int64(s.FileSize) < 0 { return nil, &FormatError{shoff + int64(off), "invalid section size", int64(s.FileSize)} } s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.FileSize)) if s.Flags&SHF_COMPRESSED == 0 { s.ReaderAt = s.sr s.Size = s.FileSize } else { // Read the compression header. switch f.Class { case ELFCLASS32: var ch Chdr32 chdata := make([]byte, unsafe.Sizeof(ch)) if _, err := s.sr.ReadAt(chdata, 0); err != nil { return nil, err } s.compressionType = CompressionType(bo.Uint32(chdata[unsafe.Offsetof(ch.Type):])) s.Size = uint64(bo.Uint32(chdata[unsafe.Offsetof(ch.Size):])) s.Addralign = uint64(bo.Uint32(chdata[unsafe.Offsetof(ch.Addralign):])) s.compressionOffset = int64(unsafe.Sizeof(ch)) case ELFCLASS64: var ch Chdr64 chdata := make([]byte, unsafe.Sizeof(ch)) if _, err := s.sr.ReadAt(chdata, 0); err != nil { return nil, err } s.compressionType = CompressionType(bo.Uint32(chdata[unsafe.Offsetof(ch.Type):])) s.Size = bo.Uint64(chdata[unsafe.Offsetof(ch.Size):]) s.Addralign = bo.Uint64(chdata[unsafe.Offsetof(ch.Addralign):]) s.compressionOffset = int64(unsafe.Sizeof(ch)) } } f.Sections = append(f.Sections, s) } if len(f.Sections) == 0 { return f, nil } // Load section header string table. if shstrndx == 0 { // If the file has no section name string table, // shstrndx holds the value SHN_UNDEF (0). return f, nil } shstr := f.Sections[shstrndx] if shstr.Type != SHT_STRTAB { return nil, &FormatError{shoff + int64(shstrndx*shentsize), "invalid ELF section name string table type", shstr.Type} } shstrtab, err := shstr.Data() if err != nil { return nil, err } for i, s := range f.Sections { var ok bool s.Name, ok = getString(shstrtab, int(names[i])) if !ok { return nil, &FormatError{shoff + int64(i*shentsize), "bad section name index", names[i]} } } return f, nil } // getSymbols returns a slice of Symbols from parsing the symbol table // with the given type, along with the associated string table. func (f *File) getSymbols(typ SectionType) ([]Symbol, []byte, error) { switch f.Class { case ELFCLASS64: return f.getSymbols64(typ) case ELFCLASS32: return f.getSymbols32(typ) } return nil, nil, errors.New("not implemented") } // ErrNoSymbols is returned by [File.Symbols] and [File.DynamicSymbols] // if there is no such section in the File. var ErrNoSymbols = errors.New("no symbol section") func (f *File) getSymbols32(typ SectionType) ([]Symbol, []byte, error) { symtabSection := f.SectionByType(typ) if symtabSection == nil { return nil, nil, ErrNoSymbols } data, err := symtabSection.Data() if err != nil { return nil, nil, fmt.Errorf("cannot load symbol section: %w", err) } if len(data) == 0 { return nil, nil, errors.New("symbol section is empty") } if len(data)%Sym32Size != 0 { return nil, nil, errors.New("length of symbol section is not a multiple of SymSize") } strdata, err := f.stringTable(symtabSection.Link) if err != nil { return nil, nil, fmt.Errorf("cannot load string table section: %w", err) } // The first entry is all zeros. data = data[Sym32Size:] symbols := make([]Symbol, len(data)/Sym32Size) i := 0 var sym Sym32 for len(data) > 0 { sym.Name = f.ByteOrder.Uint32(data[0:4]) sym.Value = f.ByteOrder.Uint32(data[4:8]) sym.Size = f.ByteOrder.Uint32(data[8:12]) sym.Info = data[12] sym.Other = data[13] sym.Shndx = f.ByteOrder.Uint16(data[14:16]) str, _ := getString(strdata, int(sym.Name)) symbols[i].Name = str symbols[i].Info = sym.Info symbols[i].Other = sym.Other symbols[i].Section = SectionIndex(sym.Shndx) symbols[i].Value = uint64(sym.Value) symbols[i].Size = uint64(sym.Size) i++ data = data[Sym32Size:] } return symbols, strdata, nil } func (f *File) getSymbols64(typ SectionType) ([]Symbol, []byte, error) { symtabSection := f.SectionByType(typ) if symtabSection == nil { return nil, nil, ErrNoSymbols } data, err := symtabSection.Data() if err != nil { return nil, nil, fmt.Errorf("cannot load symbol section: %w", err) } if len(data)%Sym64Size != 0 { return nil, nil, errors.New("length of symbol section is not a multiple of Sym64Size") } strdata, err := f.stringTable(symtabSection.Link) if err != nil { return nil, nil, fmt.Errorf("cannot load string table section: %w", err) } // The first entry is all zeros. data = data[Sym64Size:] symbols := make([]Symbol, len(data)/Sym64Size) i := 0 var sym Sym64 for len(data) > 0 { sym.Name = f.ByteOrder.Uint32(data[0:4]) sym.Info = data[4] sym.Other = data[5] sym.Shndx = f.ByteOrder.Uint16(data[6:8]) sym.Value = f.ByteOrder.Uint64(data[8:16]) sym.Size = f.ByteOrder.Uint64(data[16:24]) str, _ := getString(strdata, int(sym.Name)) symbols[i].Name = str symbols[i].Info = sym.Info symbols[i].Other = sym.Other symbols[i].Section = SectionIndex(sym.Shndx) symbols[i].Value = sym.Value symbols[i].Size = sym.Size i++ data = data[Sym64Size:] } return symbols, strdata, nil } // getString extracts a string from an ELF string table. func getString(section []byte, start int) (string, bool) { if start < 0 || start >= len(section) { return "", false } for end := start; end < len(section); end++ { if section[end] == 0 { return string(section[start:end]), true } } return "", false } // Section returns a section with the given name, or nil if no such // section exists. func (f *File) Section(name string) *Section { for _, s := range f.Sections { if s.Name == name { return s } } return nil } // applyRelocations applies relocations to dst. rels is a relocations section // in REL or RELA format. func (f *File) applyRelocations(dst []byte, rels []byte) error { switch { case f.Class == ELFCLASS64 && f.Machine == EM_X86_64: return f.applyRelocationsAMD64(dst, rels) case f.Class == ELFCLASS32 && f.Machine == EM_386: return f.applyRelocations386(dst, rels) case f.Class == ELFCLASS32 && f.Machine == EM_ARM: return f.applyRelocationsARM(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_AARCH64: return f.applyRelocationsARM64(dst, rels) case f.Class == ELFCLASS32 && f.Machine == EM_PPC: return f.applyRelocationsPPC(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_PPC64: return f.applyRelocationsPPC64(dst, rels) case f.Class == ELFCLASS32 && f.Machine == EM_MIPS: return f.applyRelocationsMIPS(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_MIPS: return f.applyRelocationsMIPS64(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_LOONGARCH: return f.applyRelocationsLOONG64(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_RISCV: return f.applyRelocationsRISCV64(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_S390: return f.applyRelocationss390x(dst, rels) case f.Class == ELFCLASS64 && f.Machine == EM_SPARCV9: return f.applyRelocationsSPARC64(dst, rels) default: return errors.New("applyRelocations: not implemented") } } // canApplyRelocation reports whether we should try to apply a // relocation to a DWARF data section, given a pointer to the symbol // targeted by the relocation. // Most relocations in DWARF data tend to be section-relative, but // some target non-section symbols (for example, low_PC attrs on // subprogram or compilation unit DIEs that target function symbols). func canApplyRelocation(sym *Symbol) bool { return sym.Section != SHN_UNDEF && sym.Section < SHN_LORESERVE } func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 32 t := R_X86_64(rela.Info & 0xffff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } // There are relocations, so this must be a normal // object file. The code below handles only basic relocations // of the form S + A (symbol plus addend). switch t { case R_X86_64_64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_X86_64_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocations386(dst []byte, rels []byte) error { // 8 is the size of Rel32. if len(rels)%8 != 0 { return errors.New("length of relocation section is not a multiple of 8") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rel Rel32 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rel) symNo := rel.Info >> 8 t := R_386(rel.Info & 0xff) if symNo == 0 || symNo > uint32(len(symbols)) { continue } sym := &symbols[symNo-1] if t == R_386_32 { if rel.Off+4 >= uint32(len(dst)) { continue } val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) val += uint32(sym.Value) f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) } } return nil } func (f *File) applyRelocationsARM(dst []byte, rels []byte) error { // 8 is the size of Rel32. if len(rels)%8 != 0 { return errors.New("length of relocation section is not a multiple of 8") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rel Rel32 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rel) symNo := rel.Info >> 8 t := R_ARM(rel.Info & 0xff) if symNo == 0 || symNo > uint32(len(symbols)) { continue } sym := &symbols[symNo-1] switch t { case R_ARM_ABS32: if rel.Off+4 >= uint32(len(dst)) { continue } val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) val += uint32(sym.Value) f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) } } return nil } func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 32 t := R_AARCH64(rela.Info & 0xffff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } // There are relocations, so this must be a normal // object file. The code below handles only basic relocations // of the form S + A (symbol plus addend). switch t { case R_AARCH64_ABS64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_AARCH64_ABS32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationsPPC(dst []byte, rels []byte) error { // 12 is the size of Rela32. if len(rels)%12 != 0 { return errors.New("length of relocation section is not a multiple of 12") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela32 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 8 t := R_PPC(rela.Info & 0xff) if symNo == 0 || symNo > uint32(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_PPC_ADDR32: if rela.Off+4 >= uint32(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 32 t := R_PPC64(rela.Info & 0xffff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_PPC64_ADDR64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_PPC64_ADDR32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationsMIPS(dst []byte, rels []byte) error { // 8 is the size of Rel32. if len(rels)%8 != 0 { return errors.New("length of relocation section is not a multiple of 8") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rel Rel32 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rel) symNo := rel.Info >> 8 t := R_MIPS(rel.Info & 0xff) if symNo == 0 || symNo > uint32(len(symbols)) { continue } sym := &symbols[symNo-1] switch t { case R_MIPS_32: if rel.Off+4 >= uint32(len(dst)) { continue } val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) val += uint32(sym.Value) f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) } } return nil } func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) var symNo uint64 var t R_MIPS if f.ByteOrder == binary.BigEndian { symNo = rela.Info >> 32 t = R_MIPS(rela.Info & 0xff) } else { symNo = rela.Info & 0xffffffff t = R_MIPS(rela.Info >> 56) } if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_MIPS_64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_MIPS_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationsLOONG64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) var symNo uint64 var t R_LARCH symNo = rela.Info >> 32 t = R_LARCH(rela.Info & 0xffff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_LARCH_64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_LARCH_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 32 t := R_RISCV(rela.Info & 0xffff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_RISCV_64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_RISCV_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationss390x(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 32 t := R_390(rela.Info & 0xffff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_390_64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_390_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) applyRelocationsSPARC64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { return errors.New("length of relocation section is not a multiple of 24") } symbols, _, err := f.getSymbols(SHT_SYMTAB) if err != nil { return err } b := bytes.NewReader(rels) var rela Rela64 for b.Len() > 0 { binary.Read(b, f.ByteOrder, &rela) symNo := rela.Info >> 32 t := R_SPARC(rela.Info & 0xff) if symNo == 0 || symNo > uint64(len(symbols)) { continue } sym := &symbols[symNo-1] if !canApplyRelocation(sym) { continue } switch t { case R_SPARC_64, R_SPARC_UA64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_SPARC_32, R_SPARC_UA32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } return nil } func (f *File) DWARF() (*dwarf.Data, error) { dwarfSuffix := func(s *Section) string { switch { case strings.HasPrefix(s.Name, ".debug_"): return s.Name[7:] case strings.HasPrefix(s.Name, ".zdebug_"): return s.Name[8:] default: return "" } } // sectionData gets the data for s, checks its size, and // applies any applicable relations. sectionData := func(i int, s *Section) ([]byte, error) { b, err := s.Data() if err != nil && uint64(len(b)) < s.Size { return nil, err } if f.Type == ET_EXEC { // Do not apply relocations to DWARF sections for ET_EXEC binaries. // Relocations should already be applied, and .rela sections may // contain incorrect data. return b, nil } for _, r := range f.Sections { if r.Type != SHT_RELA && r.Type != SHT_REL { continue } if int(r.Info) != i { continue } rd, err := r.Data() if err != nil { return nil, err } err = f.applyRelocations(b, rd) if err != nil { return nil, err } } return b, nil } // There are many DWARf sections, but these are the ones // the debug/dwarf package started with. var dat = map[string][]byte{"abbrev": nil, "info": nil, "str": nil, "line": nil, "ranges": nil} for i, s := range f.Sections { suffix := dwarfSuffix(s) if suffix == "" { continue } if _, ok := dat[suffix]; !ok { continue } b, err := sectionData(i, s) if err != nil { return nil, err } dat[suffix] = b } d, err := dwarf.New(dat["abbrev"], nil, nil, dat["info"], dat["line"], nil, dat["ranges"], dat["str"]) if err != nil { return nil, err } // Look for DWARF4 .debug_types sections and DWARF5 sections. for i, s := range f.Sections { suffix := dwarfSuffix(s) if suffix == "" { continue } if _, ok := dat[suffix]; ok { // Already handled. continue } b, err := sectionData(i, s) if err != nil { return nil, err } if suffix == "types" { if err := d.AddTypes(fmt.Sprintf("types-%d", i), b); err != nil { return nil, err } } else { if err := d.AddSection(".debug_"+suffix, b); err != nil { return nil, err } } } return d, nil } // Symbols returns the symbol table for f. The symbols will be listed in the order // they appear in f. // // For compatibility with Go 1.0, Symbols omits the null symbol at index 0. // After retrieving the symbols as symtab, an externally supplied index x // corresponds to symtab[x-1], not symtab[x]. func (f *File) Symbols() ([]Symbol, error) { sym, _, err := f.getSymbols(SHT_SYMTAB) return sym, err } // DynamicSymbols returns the dynamic symbol table for f. The symbols // will be listed in the order they appear in f. // // If f has a symbol version table, the returned [File.Symbols] will have // initialized Version and Library fields. // // For compatibility with [File.Symbols], [File.DynamicSymbols] omits the null symbol at index 0. // After retrieving the symbols as symtab, an externally supplied index x // corresponds to symtab[x-1], not symtab[x]. func (f *File) DynamicSymbols() ([]Symbol, error) { sym, str, err := f.getSymbols(SHT_DYNSYM) if err != nil { return nil, err } if f.gnuVersionInit(str) { for i := range sym { sym[i].Library, sym[i].Version = f.gnuVersion(i) } } return sym, nil } type ImportedSymbol struct { Name string Version string Library string } // ImportedSymbols returns the names of all symbols // referred to by the binary f that are expected to be // satisfied by other libraries at dynamic load time. // It does not return weak symbols. func (f *File) ImportedSymbols() ([]ImportedSymbol, error) { sym, str, err := f.getSymbols(SHT_DYNSYM) if err != nil { return nil, err } f.gnuVersionInit(str) var all []ImportedSymbol for i, s := range sym { if ST_BIND(s.Info) == STB_GLOBAL && s.Section == SHN_UNDEF { all = append(all, ImportedSymbol{Name: s.Name}) sym := &all[len(all)-1] sym.Library, sym.Version = f.gnuVersion(i) } } return all, nil } type verneed struct { File string Name string } // gnuVersionInit parses the GNU version tables // for use by calls to gnuVersion. func (f *File) gnuVersionInit(str []byte) bool { if f.gnuNeed != nil { // Already initialized return true } // Accumulate verneed information. vn := f.SectionByType(SHT_GNU_VERNEED) if vn == nil { return false } d, _ := vn.Data() var need []verneed i := 0 for { if i+16 > len(d) { break } vers := f.ByteOrder.Uint16(d[i : i+2]) if vers != 1 { break } cnt := f.ByteOrder.Uint16(d[i+2 : i+4]) fileoff := f.ByteOrder.Uint32(d[i+4 : i+8]) aux := f.ByteOrder.Uint32(d[i+8 : i+12]) next := f.ByteOrder.Uint32(d[i+12 : i+16]) file, _ := getString(str, int(fileoff)) var name string j := i + int(aux) for c := 0; c < int(cnt); c++ { if j+16 > len(d) { break } // hash := f.ByteOrder.Uint32(d[j:j+4]) // flags := f.ByteOrder.Uint16(d[j+4:j+6]) other := f.ByteOrder.Uint16(d[j+6 : j+8]) nameoff := f.ByteOrder.Uint32(d[j+8 : j+12]) next := f.ByteOrder.Uint32(d[j+12 : j+16]) name, _ = getString(str, int(nameoff)) ndx := int(other) if ndx >= len(need) { a := make([]verneed, 2*(ndx+1)) copy(a, need) need = a } need[ndx] = verneed{file, name} if next == 0 { break } j += int(next) } if next == 0 { break } i += int(next) } // Versym parallels symbol table, indexing into verneed. vs := f.SectionByType(SHT_GNU_VERSYM) if vs == nil { return false } d, _ = vs.Data() f.gnuNeed = need f.gnuVersym = d return true } // gnuVersion adds Library and Version information to sym, // which came from offset i of the symbol table. func (f *File) gnuVersion(i int) (library string, version string) { // Each entry is two bytes; skip undef entry at beginning. i = (i + 1) * 2 if i >= len(f.gnuVersym) { return } s := f.gnuVersym[i:] if len(s) < 2 { return } j := int(f.ByteOrder.Uint16(s)) if j < 2 || j >= len(f.gnuNeed) { return } n := &f.gnuNeed[j] return n.File, n.Name } // ImportedLibraries returns the names of all libraries // referred to by the binary f that are expected to be // linked with the binary at dynamic link time. func (f *File) ImportedLibraries() ([]string, error) { return f.DynString(DT_NEEDED) } // DynString returns the strings listed for the given tag in the file's dynamic // section. // // The tag must be one that takes string values: [DT_NEEDED], [DT_SONAME], [DT_RPATH], or // [DT_RUNPATH]. func (f *File) DynString(tag DynTag) ([]string, error) { switch tag { case DT_NEEDED, DT_SONAME, DT_RPATH, DT_RUNPATH: default: return nil, fmt.Errorf("non-string-valued tag %v", tag) } ds := f.SectionByType(SHT_DYNAMIC) if ds == nil { // not dynamic, so no libraries return nil, nil } d, err := ds.Data() if err != nil { return nil, err } dynSize := 8 if f.Class == ELFCLASS64 { dynSize = 16 } if len(d)%dynSize != 0 { return nil, errors.New("length of dynamic section is not a multiple of dynamic entry size") } str, err := f.stringTable(ds.Link) if err != nil { return nil, err } var all []string for len(d) > 0 { var t DynTag var v uint64 switch f.Class { case ELFCLASS32: t = DynTag(f.ByteOrder.Uint32(d[0:4])) v = uint64(f.ByteOrder.Uint32(d[4:8])) d = d[8:] case ELFCLASS64: t = DynTag(f.ByteOrder.Uint64(d[0:8])) v = f.ByteOrder.Uint64(d[8:16]) d = d[16:] } if t == tag { s, ok := getString(str, int(v)) if ok { all = append(all, s) } } } return all, nil } // DynValue returns the values listed for the given tag in the file's dynamic // section. func (f *File) DynValue(tag DynTag) ([]uint64, error) { ds := f.SectionByType(SHT_DYNAMIC) if ds == nil { return nil, nil } d, err := ds.Data() if err != nil { return nil, err } dynSize := 8 if f.Class == ELFCLASS64 { dynSize = 16 } if len(d)%dynSize != 0 { return nil, errors.New("length of dynamic section is not a multiple of dynamic entry size") } // Parse the .dynamic section as a string of bytes. var vals []uint64 for len(d) > 0 { var t DynTag var v uint64 switch f.Class { case ELFCLASS32: t = DynTag(f.ByteOrder.Uint32(d[0:4])) v = uint64(f.ByteOrder.Uint32(d[4:8])) d = d[8:] case ELFCLASS64: t = DynTag(f.ByteOrder.Uint64(d[0:8])) v = f.ByteOrder.Uint64(d[8:16]) d = d[16:] } if t == tag { vals = append(vals, v) } } return vals, nil } type nobitsSectionReader struct{} func (*nobitsSectionReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, errors.New("unexpected read from SHT_NOBITS section") }
go/src/debug/elf/file.go/0
{ "file_path": "go/src/debug/elf/file.go", "repo_id": "go", "token_count": 18878 }
233
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gosym import ( "bytes" "compress/gzip" "debug/elf" "internal/testenv" "io" "os" "os/exec" "path/filepath" "runtime" "strings" "testing" ) var ( pclineTempDir string pclinetestBinary string ) func dotest(t *testing.T) { testenv.MustHaveGoBuild(t) // For now, only works on amd64 platforms. if runtime.GOARCH != "amd64" { t.Skipf("skipping on non-AMD64 system %s", runtime.GOARCH) } // This test builds a Linux/AMD64 binary. Skipping in short mode if cross compiling. if runtime.GOOS != "linux" && testing.Short() { t.Skipf("skipping in short mode on non-Linux system %s", runtime.GOARCH) } var err error pclineTempDir, err = os.MkdirTemp("", "pclinetest") if err != nil { t.Fatal(err) } pclinetestBinary = filepath.Join(pclineTempDir, "pclinetest") cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", pclinetestBinary) cmd.Dir = "testdata" cmd.Env = append(os.Environ(), "GOOS=linux") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { t.Fatal(err) } } func endtest() { if pclineTempDir != "" { os.RemoveAll(pclineTempDir) pclineTempDir = "" pclinetestBinary = "" } } // skipIfNotELF skips the test if we are not running on an ELF system. // These tests open and examine the test binary, and use elf.Open to do so. func skipIfNotELF(t *testing.T) { switch runtime.GOOS { case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris", "illumos": // OK. default: t.Skipf("skipping on non-ELF system %s", runtime.GOOS) } } func getTable(t *testing.T) *Table { f, tab := crack(os.Args[0], t) f.Close() return tab } func crack(file string, t *testing.T) (*elf.File, *Table) { // Open self f, err := elf.Open(file) if err != nil { t.Fatal(err) } return parse(file, f, t) } func parse(file string, f *elf.File, t *testing.T) (*elf.File, *Table) { s := f.Section(".gosymtab") if s == nil { t.Skip("no .gosymtab section") } symdat, err := s.Data() if err != nil { f.Close() t.Fatalf("reading %s gosymtab: %v", file, err) } pclndat, err := f.Section(".gopclntab").Data() if err != nil { f.Close() t.Fatalf("reading %s gopclntab: %v", file, err) } pcln := NewLineTable(pclndat, f.Section(".text").Addr) tab, err := NewTable(symdat, pcln) if err != nil { f.Close() t.Fatalf("parsing %s gosymtab: %v", file, err) } return f, tab } func TestLineFromAline(t *testing.T) { skipIfNotELF(t) tab := getTable(t) if tab.go12line != nil { // aline's don't exist in the Go 1.2 table. t.Skip("not relevant to Go 1.2 symbol table") } // Find the sym package pkg := tab.LookupFunc("debug/gosym.TestLineFromAline").Obj if pkg == nil { t.Fatalf("nil pkg") } // Walk every absolute line and ensure that we hit every // source line monotonically lastline := make(map[string]int) final := -1 for i := 0; i < 10000; i++ { path, line := pkg.lineFromAline(i) // Check for end of object if path == "" { if final == -1 { final = i - 1 } continue } else if final != -1 { t.Fatalf("reached end of package at absolute line %d, but absolute line %d mapped to %s:%d", final, i, path, line) } // It's okay to see files multiple times (e.g., sys.a) if line == 1 { lastline[path] = 1 continue } // Check that the is the next line in path ll, ok := lastline[path] if !ok { t.Errorf("file %s starts on line %d", path, line) } else if line != ll+1 { t.Fatalf("expected next line of file %s to be %d, got %d", path, ll+1, line) } lastline[path] = line } if final == -1 { t.Errorf("never reached end of object") } } func TestLineAline(t *testing.T) { skipIfNotELF(t) tab := getTable(t) if tab.go12line != nil { // aline's don't exist in the Go 1.2 table. t.Skip("not relevant to Go 1.2 symbol table") } for _, o := range tab.Files { // A source file can appear multiple times in a // object. alineFromLine will always return alines in // the first file, so track which lines we've seen. found := make(map[string]int) for i := 0; i < 1000; i++ { path, line := o.lineFromAline(i) if path == "" { break } // cgo files are full of 'Z' symbols, which we don't handle if len(path) > 4 && path[len(path)-4:] == ".cgo" { continue } if minline, ok := found[path]; path != "" && ok { if minline >= line { // We've already covered this file continue } } found[path] = line a, err := o.alineFromLine(path, line) if err != nil { t.Errorf("absolute line %d in object %s maps to %s:%d, but mapping that back gives error %s", i, o.Paths[0].Name, path, line, err) } else if a != i { t.Errorf("absolute line %d in object %s maps to %s:%d, which maps back to absolute line %d\n", i, o.Paths[0].Name, path, line, a) } } } } func TestPCLine(t *testing.T) { dotest(t) defer endtest() f, tab := crack(pclinetestBinary, t) defer f.Close() text := f.Section(".text") textdat, err := text.Data() if err != nil { t.Fatalf("reading .text: %v", err) } // Test PCToLine sym := tab.LookupFunc("main.linefrompc") wantLine := 0 for pc := sym.Entry; pc < sym.End; pc++ { off := pc - text.Addr // TODO(rsc): should not need off; bug in 8g if textdat[off] == 255 { break } wantLine += int(textdat[off]) t.Logf("off is %d %#x (max %d)", off, textdat[off], sym.End-pc) file, line, fn := tab.PCToLine(pc) if fn == nil { t.Errorf("failed to get line of PC %#x", pc) } else if !strings.HasSuffix(file, "pclinetest.s") || line != wantLine || fn != sym { t.Errorf("PCToLine(%#x) = %s:%d (%s), want %s:%d (%s)", pc, file, line, fn.Name, "pclinetest.s", wantLine, sym.Name) } } // Test LineToPC sym = tab.LookupFunc("main.pcfromline") lookupline := -1 wantLine = 0 off := uint64(0) // TODO(rsc): should not need off; bug in 8g for pc := sym.Value; pc < sym.End; pc += 2 + uint64(textdat[off]) { file, line, fn := tab.PCToLine(pc) off = pc - text.Addr if textdat[off] == 255 { break } wantLine += int(textdat[off]) if line != wantLine { t.Errorf("expected line %d at PC %#x in pcfromline, got %d", wantLine, pc, line) off = pc + 1 - text.Addr continue } if lookupline == -1 { lookupline = line } for ; lookupline <= line; lookupline++ { pc2, fn2, err := tab.LineToPC(file, lookupline) if lookupline != line { // Should be nothing on this line if err == nil { t.Errorf("expected no PC at line %d, got %#x (%s)", lookupline, pc2, fn2.Name) } } else if err != nil { t.Errorf("failed to get PC of line %d: %s", lookupline, err) } else if pc != pc2 { t.Errorf("expected PC %#x (%s) at line %d, got PC %#x (%s)", pc, fn.Name, line, pc2, fn2.Name) } } off = pc + 1 - text.Addr } } func TestSymVersion(t *testing.T) { skipIfNotELF(t) table := getTable(t) if table.go12line == nil { t.Skip("not relevant to Go 1.2+ symbol table") } for _, fn := range table.Funcs { if fn.goVersion == verUnknown { t.Fatalf("unexpected symbol version: %v", fn) } } } // read115Executable returns a hello world executable compiled by Go 1.15. // // The file was compiled in /tmp/hello.go: // // package main // // func main() { // println("hello") // } func read115Executable(tb testing.TB) []byte { zippedDat, err := os.ReadFile("testdata/pcln115.gz") if err != nil { tb.Fatal(err) } var gzReader *gzip.Reader gzReader, err = gzip.NewReader(bytes.NewBuffer(zippedDat)) if err != nil { tb.Fatal(err) } var dat []byte dat, err = io.ReadAll(gzReader) if err != nil { tb.Fatal(err) } return dat } // Test that we can parse a pclntab from 1.15. func Test115PclnParsing(t *testing.T) { dat := read115Executable(t) const textStart = 0x1001000 pcln := NewLineTable(dat, textStart) tab, err := NewTable(nil, pcln) if err != nil { t.Fatal(err) } var f *Func var pc uint64 pc, f, err = tab.LineToPC("/tmp/hello.go", 3) if err != nil { t.Fatal(err) } if pcln.version != ver12 { t.Fatal("Expected pcln to parse as an older version") } if pc != 0x105c280 { t.Fatalf("expect pc = 0x105c280, got 0x%x", pc) } if f.Name != "main.main" { t.Fatalf("expected to parse name as main.main, got %v", f.Name) } } var ( sinkLineTable *LineTable sinkTable *Table ) func Benchmark115(b *testing.B) { dat := read115Executable(b) const textStart = 0x1001000 b.Run("NewLineTable", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { sinkLineTable = NewLineTable(dat, textStart) } }) pcln := NewLineTable(dat, textStart) b.Run("NewTable", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { var err error sinkTable, err = NewTable(nil, pcln) if err != nil { b.Fatal(err) } } }) tab, err := NewTable(nil, pcln) if err != nil { b.Fatal(err) } b.Run("LineToPC", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { var f *Func var pc uint64 pc, f, err = tab.LineToPC("/tmp/hello.go", 3) if err != nil { b.Fatal(err) } if pcln.version != ver12 { b.Fatalf("want version=%d, got %d", ver12, pcln.version) } if pc != 0x105c280 { b.Fatalf("want pc=0x105c280, got 0x%x", pc) } if f.Name != "main.main" { b.Fatalf("want name=main.main, got %q", f.Name) } } }) b.Run("PCToLine", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { file, line, fn := tab.PCToLine(0x105c280) if file != "/tmp/hello.go" { b.Fatalf("want name=/tmp/hello.go, got %q", file) } if line != 3 { b.Fatalf("want line=3, got %d", line) } if fn.Name != "main.main" { b.Fatalf("want name=main.main, got %q", fn.Name) } } }) }
go/src/debug/gosym/pclntab_test.go/0
{ "file_path": "go/src/debug/gosym/pclntab_test.go", "repo_id": "go", "token_count": 4319 }
234
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package embedtest import ( "embed" "io" "reflect" "slices" "testing" "testing/fstest" ) //go:embed testdata/h*.txt //go:embed c*.txt testdata/g*.txt var global embed.FS //go:embed c*txt var concurrency string //go:embed testdata/g*.txt var glass []byte func testFiles(t *testing.T, f embed.FS, name, data string) { t.Helper() d, err := f.ReadFile(name) if err != nil { t.Error(err) return } if string(d) != data { t.Errorf("read %v = %q, want %q", name, d, data) } } func testString(t *testing.T, s, name, data string) { t.Helper() if s != data { t.Errorf("%v = %q, want %q", name, s, data) } } func testDir(t *testing.T, f embed.FS, name string, expect ...string) { t.Helper() dirs, err := f.ReadDir(name) if err != nil { t.Error(err) return } var names []string for _, d := range dirs { name := d.Name() if d.IsDir() { name += "/" } names = append(names, name) } if !slices.Equal(names, expect) { t.Errorf("readdir %v = %v, want %v", name, names, expect) } } // Tests for issue 49514. var _ = '"' var _ = '\'' var _ = '🦆' func TestGlobal(t *testing.T) { testFiles(t, global, "concurrency.txt", "Concurrency is not parallelism.\n") testFiles(t, global, "testdata/hello.txt", "hello, world\n") testFiles(t, global, "testdata/glass.txt", "I can eat glass and it doesn't hurt me.\n") if err := fstest.TestFS(global, "concurrency.txt", "testdata/hello.txt"); err != nil { t.Fatal(err) } testString(t, concurrency, "concurrency", "Concurrency is not parallelism.\n") testString(t, string(glass), "glass", "I can eat glass and it doesn't hurt me.\n") } //go:embed testdata var testDirAll embed.FS func TestDir(t *testing.T) { all := testDirAll testFiles(t, all, "testdata/hello.txt", "hello, world\n") testFiles(t, all, "testdata/i/i18n.txt", "internationalization\n") testFiles(t, all, "testdata/i/j/k/k8s.txt", "kubernetes\n") testFiles(t, all, "testdata/ken.txt", "If a program is too slow, it must have a loop.\n") testDir(t, all, ".", "testdata/") testDir(t, all, "testdata/i", "i18n.txt", "j/") testDir(t, all, "testdata/i/j", "k/") testDir(t, all, "testdata/i/j/k", "k8s.txt") } var ( //go:embed testdata testHiddenDir embed.FS //go:embed testdata/* testHiddenStar embed.FS ) func TestHidden(t *testing.T) { dir := testHiddenDir star := testHiddenStar t.Logf("//go:embed testdata") testDir(t, dir, "testdata", "-not-hidden/", "ascii.txt", "glass.txt", "hello.txt", "i/", "ken.txt") t.Logf("//go:embed testdata/*") testDir(t, star, "testdata", "-not-hidden/", ".hidden/", "_hidden/", "ascii.txt", "glass.txt", "hello.txt", "i/", "ken.txt") testDir(t, star, "testdata/.hidden", "fortune.txt", "more/") // but not .more or _more } func TestUninitialized(t *testing.T) { var uninitialized embed.FS testDir(t, uninitialized, ".") f, err := uninitialized.Open(".") if err != nil { t.Fatal(err) } defer f.Close() fi, err := f.Stat() if err != nil { t.Fatal(err) } if !fi.IsDir() { t.Errorf("in uninitialized embed.FS, . is not a directory") } } var ( //go:embed "testdata/hello.txt" helloT []T //go:embed "testdata/hello.txt" helloUint8 []uint8 //go:embed "testdata/hello.txt" helloEUint8 []EmbedUint8 //go:embed "testdata/hello.txt" helloBytes EmbedBytes //go:embed "testdata/hello.txt" helloString EmbedString ) type T byte type EmbedUint8 uint8 type EmbedBytes []byte type EmbedString string // golang.org/issue/47735 func TestAliases(t *testing.T) { all := testDirAll want, e := all.ReadFile("testdata/hello.txt") if e != nil { t.Fatal("ReadFile:", e) } check := func(g any) { got := reflect.ValueOf(g) for i := 0; i < got.Len(); i++ { if byte(got.Index(i).Uint()) != want[i] { t.Fatalf("got %v want %v", got.Bytes(), want) } } } check(helloT) check(helloUint8) check(helloEUint8) check(helloBytes) check(helloString) } func TestOffset(t *testing.T) { file, err := testDirAll.Open("testdata/hello.txt") if err != nil { t.Fatal("Open:", err) } want := "hello, world\n" // Read the entire file. got := make([]byte, len(want)) n, err := file.Read(got) if err != nil { t.Fatal("Read:", err) } if n != len(want) { t.Fatal("Read:", n) } if string(got) != want { t.Fatalf("Read: %q", got) } // Try to read one byte; confirm we're at the EOF. var buf [1]byte n, err = file.Read(buf[:]) if err != io.EOF { t.Fatal("Read:", err) } if n != 0 { t.Fatal("Read:", n) } // Use seek to get the offset at the EOF. seeker := file.(io.Seeker) off, err := seeker.Seek(0, io.SeekCurrent) if err != nil { t.Fatal("Seek:", err) } if off != int64(len(want)) { t.Fatal("Seek:", off) } // Use ReadAt to read the entire file, ignoring the offset. at := file.(io.ReaderAt) got = make([]byte, len(want)) n, err = at.ReadAt(got, 0) if err != nil { t.Fatal("ReadAt:", err) } if n != len(want) { t.Fatalf("ReadAt: got %d bytes, want %d bytes", n, len(want)) } if string(got) != want { t.Fatalf("ReadAt: got %q, want %q", got, want) } // Use ReadAt with non-zero offset. off = int64(7) want = want[off:] got = make([]byte, len(want)) n, err = at.ReadAt(got, off) if err != nil { t.Fatal("ReadAt:", err) } if n != len(want) { t.Fatalf("ReadAt: got %d bytes, want %d bytes", n, len(want)) } if string(got) != want { t.Fatalf("ReadAt: got %q, want %q", got, want) } }
go/src/embed/internal/embedtest/embed_test.go/0
{ "file_path": "go/src/embed/internal/embedtest/embed_test.go", "repo_id": "go", "token_count": 2375 }
235
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package asn1 implements parsing of DER-encoded ASN.1 data structures, // as defined in ITU-T Rec X.690. // // See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,” // http://luca.ntop.org/Teaching/Appunti/asn1.html. package asn1 // ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc // are different encoding formats for those objects. Here, we'll be dealing // with DER, the Distinguished Encoding Rules. DER is used in X.509 because // it's fast to parse and, unlike BER, has a unique encoding for every object. // When calculating hashes over objects, it's important that the resulting // bytes be the same at both ends and DER removes this margin of error. // // ASN.1 is very complex and this package doesn't attempt to implement // everything by any means. import ( "errors" "fmt" "math" "math/big" "reflect" "strconv" "strings" "time" "unicode/utf16" "unicode/utf8" ) // A StructuralError suggests that the ASN.1 data is valid, but the Go type // which is receiving it doesn't match. type StructuralError struct { Msg string } func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } // A SyntaxError suggests that the ASN.1 data is invalid. type SyntaxError struct { Msg string } func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } // We start by dealing with each of the primitive types in turn. // BOOLEAN func parseBool(bytes []byte) (ret bool, err error) { if len(bytes) != 1 { err = SyntaxError{"invalid boolean"} return } // DER demands that "If the encoding represents the boolean value TRUE, // its single contents octet shall have all eight bits set to one." // Thus only 0 and 255 are valid encoded values. switch bytes[0] { case 0: ret = false case 0xff: ret = true default: err = SyntaxError{"invalid boolean"} } return } // INTEGER // checkInteger returns nil if the given bytes are a valid DER-encoded // INTEGER and an error otherwise. func checkInteger(bytes []byte) error { if len(bytes) == 0 { return StructuralError{"empty integer"} } if len(bytes) == 1 { return nil } if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { return StructuralError{"integer not minimally-encoded"} } return nil } // parseInt64 treats the given bytes as a big-endian, signed integer and // returns the result. func parseInt64(bytes []byte) (ret int64, err error) { err = checkInteger(bytes) if err != nil { return } if len(bytes) > 8 { // We'll overflow an int64 in this case. err = StructuralError{"integer too large"} return } for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { ret <<= 8 ret |= int64(bytes[bytesRead]) } // Shift up and down in order to sign extend the result. ret <<= 64 - uint8(len(bytes))*8 ret >>= 64 - uint8(len(bytes))*8 return } // parseInt32 treats the given bytes as a big-endian, signed integer and returns // the result. func parseInt32(bytes []byte) (int32, error) { if err := checkInteger(bytes); err != nil { return 0, err } ret64, err := parseInt64(bytes) if err != nil { return 0, err } if ret64 != int64(int32(ret64)) { return 0, StructuralError{"integer too large"} } return int32(ret64), nil } var bigOne = big.NewInt(1) // parseBigInt treats the given bytes as a big-endian, signed integer and returns // the result. func parseBigInt(bytes []byte) (*big.Int, error) { if err := checkInteger(bytes); err != nil { return nil, err } ret := new(big.Int) if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { // This is a negative number. notBytes := make([]byte, len(bytes)) for i := range notBytes { notBytes[i] = ^bytes[i] } ret.SetBytes(notBytes) ret.Add(ret, bigOne) ret.Neg(ret) return ret, nil } ret.SetBytes(bytes) return ret, nil } // BIT STRING // BitString is the structure to use when you want an ASN.1 BIT STRING type. A // bit string is padded up to the nearest byte in memory and the number of // valid bits is recorded. Padding bits will be zero. type BitString struct { Bytes []byte // bits packed into bytes. BitLength int // length in bits. } // At returns the bit at the given index. If the index is out of range it // returns 0. func (b BitString) At(i int) int { if i < 0 || i >= b.BitLength { return 0 } x := i / 8 y := 7 - uint(i%8) return int(b.Bytes[x]>>y) & 1 } // RightAlign returns a slice where the padding bits are at the beginning. The // slice may share memory with the BitString. func (b BitString) RightAlign() []byte { shift := uint(8 - (b.BitLength % 8)) if shift == 8 || len(b.Bytes) == 0 { return b.Bytes } a := make([]byte, len(b.Bytes)) a[0] = b.Bytes[0] >> shift for i := 1; i < len(b.Bytes); i++ { a[i] = b.Bytes[i-1] << (8 - shift) a[i] |= b.Bytes[i] >> shift } return a } // parseBitString parses an ASN.1 bit string from the given byte slice and returns it. func parseBitString(bytes []byte) (ret BitString, err error) { if len(bytes) == 0 { err = SyntaxError{"zero length BIT STRING"} return } paddingBits := int(bytes[0]) if paddingBits > 7 || len(bytes) == 1 && paddingBits > 0 || bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 { err = SyntaxError{"invalid padding bits in BIT STRING"} return } ret.BitLength = (len(bytes)-1)*8 - paddingBits ret.Bytes = bytes[1:] return } // NULL // NullRawValue is a [RawValue] with its Tag set to the ASN.1 NULL type tag (5). var NullRawValue = RawValue{Tag: TagNull} // NullBytes contains bytes representing the DER-encoded ASN.1 NULL type. var NullBytes = []byte{TagNull, 0} // OBJECT IDENTIFIER // An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER. type ObjectIdentifier []int // Equal reports whether oi and other represent the same identifier. func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool { if len(oi) != len(other) { return false } for i := 0; i < len(oi); i++ { if oi[i] != other[i] { return false } } return true } func (oi ObjectIdentifier) String() string { var s strings.Builder s.Grow(32) buf := make([]byte, 0, 19) for i, v := range oi { if i > 0 { s.WriteByte('.') } s.Write(strconv.AppendInt(buf, int64(v), 10)) } return s.String() } // parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and // returns it. An object identifier is a sequence of variable length integers // that are assigned in a hierarchy. func parseObjectIdentifier(bytes []byte) (s ObjectIdentifier, err error) { if len(bytes) == 0 { err = SyntaxError{"zero length OBJECT IDENTIFIER"} return } // In the worst case, we get two elements from the first byte (which is // encoded differently) and then every varint is a single byte long. s = make([]int, len(bytes)+1) // The first varint is 40*value1 + value2: // According to this packing, value1 can take the values 0, 1 and 2 only. // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, // then there are no restrictions on value2. v, offset, err := parseBase128Int(bytes, 0) if err != nil { return } if v < 80 { s[0] = v / 40 s[1] = v % 40 } else { s[0] = 2 s[1] = v - 80 } i := 2 for ; offset < len(bytes); i++ { v, offset, err = parseBase128Int(bytes, offset) if err != nil { return } s[i] = v } s = s[0:i] return } // ENUMERATED // An Enumerated is represented as a plain int. type Enumerated int // FLAG // A Flag accepts any data and is set to true if present. type Flag bool // parseBase128Int parses a base-128 encoded int from the given offset in the // given byte slice. It returns the value and the new offset. func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { offset = initOffset var ret64 int64 for shifted := 0; offset < len(bytes); shifted++ { // 5 * 7 bits per byte == 35 bits of data // Thus the representation is either non-minimal or too large for an int32 if shifted == 5 { err = StructuralError{"base 128 integer too large"} return } ret64 <<= 7 b := bytes[offset] // integers should be minimally encoded, so the leading octet should // never be 0x80 if shifted == 0 && b == 0x80 { err = SyntaxError{"integer is not minimally encoded"} return } ret64 |= int64(b & 0x7f) offset++ if b&0x80 == 0 { ret = int(ret64) // Ensure that the returned value fits in an int on all platforms if ret64 > math.MaxInt32 { err = StructuralError{"base 128 integer too large"} } return } } err = SyntaxError{"truncated base 128 integer"} return } // UTCTime func parseUTCTime(bytes []byte) (ret time.Time, err error) { s := string(bytes) formatStr := "0601021504Z0700" ret, err = time.Parse(formatStr, s) if err != nil { formatStr = "060102150405Z0700" ret, err = time.Parse(formatStr, s) } if err != nil { return } if serialized := ret.Format(formatStr); serialized != s { err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) return } if ret.Year() >= 2050 { // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 ret = ret.AddDate(-100, 0, 0) } return } // parseGeneralizedTime parses the GeneralizedTime from the given byte slice // and returns the resulting time. func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { const formatStr = "20060102150405.999999999Z0700" s := string(bytes) if ret, err = time.Parse(formatStr, s); err != nil { return } if serialized := ret.Format(formatStr); serialized != s { err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) } return } // NumericString // parseNumericString parses an ASN.1 NumericString from the given byte array // and returns it. func parseNumericString(bytes []byte) (ret string, err error) { for _, b := range bytes { if !isNumeric(b) { return "", SyntaxError{"NumericString contains invalid character"} } } return string(bytes), nil } // isNumeric reports whether the given b is in the ASN.1 NumericString set. func isNumeric(b byte) bool { return '0' <= b && b <= '9' || b == ' ' } // PrintableString // parsePrintableString parses an ASN.1 PrintableString from the given byte // array and returns it. func parsePrintableString(bytes []byte) (ret string, err error) { for _, b := range bytes { if !isPrintable(b, allowAsterisk, allowAmpersand) { err = SyntaxError{"PrintableString contains invalid character"} return } } ret = string(bytes) return } type asteriskFlag bool type ampersandFlag bool const ( allowAsterisk asteriskFlag = true rejectAsterisk asteriskFlag = false allowAmpersand ampersandFlag = true rejectAmpersand ampersandFlag = false ) // isPrintable reports whether the given b is in the ASN.1 PrintableString set. // If asterisk is allowAsterisk then '*' is also allowed, reflecting existing // practice. If ampersand is allowAmpersand then '&' is allowed as well. func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool { return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || '0' <= b && b <= '9' || '\'' <= b && b <= ')' || '+' <= b && b <= '/' || b == ' ' || b == ':' || b == '=' || b == '?' || // This is technically not allowed in a PrintableString. // However, x509 certificates with wildcard strings don't // always use the correct string type so we permit it. (bool(asterisk) && b == '*') || // This is not technically allowed either. However, not // only is it relatively common, but there are also a // handful of CA certificates that contain it. At least // one of which will not expire until 2027. (bool(ampersand) && b == '&') } // IA5String // parseIA5String parses an ASN.1 IA5String (ASCII string) from the given // byte slice and returns it. func parseIA5String(bytes []byte) (ret string, err error) { for _, b := range bytes { if b >= utf8.RuneSelf { err = SyntaxError{"IA5String contains invalid character"} return } } ret = string(bytes) return } // T61String // parseT61String parses an ASN.1 T61String (8-bit clean string) from the given // byte slice and returns it. func parseT61String(bytes []byte) (ret string, err error) { return string(bytes), nil } // UTF8String // parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte // array and returns it. func parseUTF8String(bytes []byte) (ret string, err error) { if !utf8.Valid(bytes) { return "", errors.New("asn1: invalid UTF-8 string") } return string(bytes), nil } // BMPString // parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of // ISO/IEC/ITU 10646-1) from the given byte slice and returns it. func parseBMPString(bmpString []byte) (string, error) { if len(bmpString)%2 != 0 { return "", errors.New("pkcs12: odd-length BMP string") } // Strip terminator if present. if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { bmpString = bmpString[:l-2] } s := make([]uint16, 0, len(bmpString)/2) for len(bmpString) > 0 { s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) bmpString = bmpString[2:] } return string(utf16.Decode(s)), nil } // A RawValue represents an undecoded ASN.1 object. type RawValue struct { Class, Tag int IsCompound bool Bytes []byte FullBytes []byte // includes the tag and length } // RawContent is used to signal that the undecoded, DER data needs to be // preserved for a struct. To use it, the first field of the struct must have // this type. It's an error for any of the other fields to have this type. type RawContent []byte // Tagging // parseTagAndLength parses an ASN.1 tag and length pair from the given offset // into a byte slice. It returns the parsed data and the new offset. SET and // SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we // don't distinguish between ordered and unordered objects in this code. func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { offset = initOffset // parseTagAndLength should not be called without at least a single // byte to read. Thus this check is for robustness: if offset >= len(bytes) { err = errors.New("asn1: internal error in parseTagAndLength") return } b := bytes[offset] offset++ ret.class = int(b >> 6) ret.isCompound = b&0x20 == 0x20 ret.tag = int(b & 0x1f) // If the bottom five bits are set, then the tag number is actually base 128 // encoded afterwards if ret.tag == 0x1f { ret.tag, offset, err = parseBase128Int(bytes, offset) if err != nil { return } // Tags should be encoded in minimal form. if ret.tag < 0x1f { err = SyntaxError{"non-minimal tag"} return } } if offset >= len(bytes) { err = SyntaxError{"truncated tag or length"} return } b = bytes[offset] offset++ if b&0x80 == 0 { // The length is encoded in the bottom 7 bits. ret.length = int(b & 0x7f) } else { // Bottom 7 bits give the number of length bytes to follow. numBytes := int(b & 0x7f) if numBytes == 0 { err = SyntaxError{"indefinite length found (not DER)"} return } ret.length = 0 for i := 0; i < numBytes; i++ { if offset >= len(bytes) { err = SyntaxError{"truncated tag or length"} return } b = bytes[offset] offset++ if ret.length >= 1<<23 { // We can't shift ret.length up without // overflowing. err = StructuralError{"length too large"} return } ret.length <<= 8 ret.length |= int(b) if ret.length == 0 { // DER requires that lengths be minimal. err = StructuralError{"superfluous leading zeros in length"} return } } // Short lengths must be encoded in short form. if ret.length < 0x80 { err = StructuralError{"non-minimal length"} return } } return } // parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse // a number of ASN.1 values from the given byte slice and returns them as a // slice of Go values of the given type. func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { matchAny, expectedTag, compoundType, ok := getUniversalType(elemType) if !ok { err = StructuralError{"unknown Go type for slice"} return } // First we iterate over the input and count the number of elements, // checking that the types are correct in each case. numElements := 0 for offset := 0; offset < len(bytes); { var t tagAndLength t, offset, err = parseTagAndLength(bytes, offset) if err != nil { return } switch t.tag { case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString: // We pretend that various other string types are // PRINTABLE STRINGs so that a sequence of them can be // parsed into a []string. t.tag = TagPrintableString case TagGeneralizedTime, TagUTCTime: // Likewise, both time types are treated the same. t.tag = TagUTCTime } if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) { err = StructuralError{"sequence tag mismatch"} return } if invalidLength(offset, t.length, len(bytes)) { err = SyntaxError{"truncated sequence"} return } offset += t.length numElements++ } ret = reflect.MakeSlice(sliceType, numElements, numElements) params := fieldParameters{} offset := 0 for i := 0; i < numElements; i++ { offset, err = parseField(ret.Index(i), bytes, offset, params) if err != nil { return } } return } var ( bitStringType = reflect.TypeFor[BitString]() objectIdentifierType = reflect.TypeFor[ObjectIdentifier]() enumeratedType = reflect.TypeFor[Enumerated]() flagType = reflect.TypeFor[Flag]() timeType = reflect.TypeFor[time.Time]() rawValueType = reflect.TypeFor[RawValue]() rawContentsType = reflect.TypeFor[RawContent]() bigIntType = reflect.TypeFor[*big.Int]() ) // invalidLength reports whether offset + length > sliceLength, or if the // addition would overflow. func invalidLength(offset, length, sliceLength int) bool { return offset+length < offset || offset+length > sliceLength } // parseField is the main parsing function. Given a byte slice and an offset // into the array, it will try to parse a suitable ASN.1 value out and store it // in the given Value. func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { offset = initOffset fieldType := v.Type() // If we have run out of data, it may be that there are optional elements at the end. if offset == len(bytes) { if !setDefaultValue(v, params) { err = SyntaxError{"sequence truncated"} } return } // Deal with the ANY type. if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { var t tagAndLength t, offset, err = parseTagAndLength(bytes, offset) if err != nil { return } if invalidLength(offset, t.length, len(bytes)) { err = SyntaxError{"data truncated"} return } var result any if !t.isCompound && t.class == ClassUniversal { innerBytes := bytes[offset : offset+t.length] switch t.tag { case TagPrintableString: result, err = parsePrintableString(innerBytes) case TagNumericString: result, err = parseNumericString(innerBytes) case TagIA5String: result, err = parseIA5String(innerBytes) case TagT61String: result, err = parseT61String(innerBytes) case TagUTF8String: result, err = parseUTF8String(innerBytes) case TagInteger: result, err = parseInt64(innerBytes) case TagBitString: result, err = parseBitString(innerBytes) case TagOID: result, err = parseObjectIdentifier(innerBytes) case TagUTCTime: result, err = parseUTCTime(innerBytes) case TagGeneralizedTime: result, err = parseGeneralizedTime(innerBytes) case TagOctetString: result = innerBytes case TagBMPString: result, err = parseBMPString(innerBytes) default: // If we don't know how to handle the type, we just leave Value as nil. } } offset += t.length if err != nil { return } if result != nil { v.Set(reflect.ValueOf(result)) } return } t, offset, err := parseTagAndLength(bytes, offset) if err != nil { return } if params.explicit { expectedClass := ClassContextSpecific if params.application { expectedClass = ClassApplication } if offset == len(bytes) { err = StructuralError{"explicit tag has no child"} return } if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { if fieldType == rawValueType { // The inner element should not be parsed for RawValues. } else if t.length > 0 { t, offset, err = parseTagAndLength(bytes, offset) if err != nil { return } } else { if fieldType != flagType { err = StructuralError{"zero length explicit tag was not an asn1.Flag"} return } v.SetBool(true) return } } else { // The tags didn't match, it might be an optional element. ok := setDefaultValue(v, params) if ok { offset = initOffset } else { err = StructuralError{"explicitly tagged member didn't match"} } return } } matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType) if !ok1 { err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} return } // Special case for strings: all the ASN.1 string types map to the Go // type string. getUniversalType returns the tag for PrintableString // when it sees a string, so if we see a different string type on the // wire, we change the universal type to match. if universalTag == TagPrintableString { if t.class == ClassUniversal { switch t.tag { case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString: universalTag = t.tag } } else if params.stringType != 0 { universalTag = params.stringType } } // Special case for time: UTCTime and GeneralizedTime both map to the // Go type time.Time. if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { universalTag = TagGeneralizedTime } if params.set { universalTag = TagSet } matchAnyClassAndTag := matchAny expectedClass := ClassUniversal expectedTag := universalTag if !params.explicit && params.tag != nil { expectedClass = ClassContextSpecific expectedTag = *params.tag matchAnyClassAndTag = false } if !params.explicit && params.application && params.tag != nil { expectedClass = ClassApplication expectedTag = *params.tag matchAnyClassAndTag = false } if !params.explicit && params.private && params.tag != nil { expectedClass = ClassPrivate expectedTag = *params.tag matchAnyClassAndTag = false } // We have unwrapped any explicit tagging at this point. if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) || (!matchAny && t.isCompound != compoundType) { // Tags don't match. Again, it could be an optional element. ok := setDefaultValue(v, params) if ok { offset = initOffset } else { err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} } return } if invalidLength(offset, t.length, len(bytes)) { err = SyntaxError{"data truncated"} return } innerBytes := bytes[offset : offset+t.length] offset += t.length // We deal with the structures defined in this package first. switch v := v.Addr().Interface().(type) { case *RawValue: *v = RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]} return case *ObjectIdentifier: *v, err = parseObjectIdentifier(innerBytes) return case *BitString: *v, err = parseBitString(innerBytes) return case *time.Time: if universalTag == TagUTCTime { *v, err = parseUTCTime(innerBytes) return } *v, err = parseGeneralizedTime(innerBytes) return case *Enumerated: parsedInt, err1 := parseInt32(innerBytes) if err1 == nil { *v = Enumerated(parsedInt) } err = err1 return case *Flag: *v = true return case **big.Int: parsedInt, err1 := parseBigInt(innerBytes) if err1 == nil { *v = parsedInt } err = err1 return } switch val := v; val.Kind() { case reflect.Bool: parsedBool, err1 := parseBool(innerBytes) if err1 == nil { val.SetBool(parsedBool) } err = err1 return case reflect.Int, reflect.Int32, reflect.Int64: if val.Type().Size() == 4 { parsedInt, err1 := parseInt32(innerBytes) if err1 == nil { val.SetInt(int64(parsedInt)) } err = err1 } else { parsedInt, err1 := parseInt64(innerBytes) if err1 == nil { val.SetInt(parsedInt) } err = err1 } return // TODO(dfc) Add support for the remaining integer types case reflect.Struct: structType := fieldType for i := 0; i < structType.NumField(); i++ { if !structType.Field(i).IsExported() { err = StructuralError{"struct contains unexported fields"} return } } if structType.NumField() > 0 && structType.Field(0).Type == rawContentsType { bytes := bytes[initOffset:offset] val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) } innerOffset := 0 for i := 0; i < structType.NumField(); i++ { field := structType.Field(i) if i == 0 && field.Type == rawContentsType { continue } innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) if err != nil { return } } // We allow extra bytes at the end of the SEQUENCE because // adding elements to the end has been used in X.509 as the // version numbers have increased. return case reflect.Slice: sliceType := fieldType if sliceType.Elem().Kind() == reflect.Uint8 { val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) reflect.Copy(val, reflect.ValueOf(innerBytes)) return } newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) if err1 == nil { val.Set(newSlice) } err = err1 return case reflect.String: var v string switch universalTag { case TagPrintableString: v, err = parsePrintableString(innerBytes) case TagNumericString: v, err = parseNumericString(innerBytes) case TagIA5String: v, err = parseIA5String(innerBytes) case TagT61String: v, err = parseT61String(innerBytes) case TagUTF8String: v, err = parseUTF8String(innerBytes) case TagGeneralString: // GeneralString is specified in ISO-2022/ECMA-35, // A brief review suggests that it includes structures // that allow the encoding to change midstring and // such. We give up and pass it as an 8-bit string. v, err = parseT61String(innerBytes) case TagBMPString: v, err = parseBMPString(innerBytes) default: err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} } if err == nil { val.SetString(v) } return } err = StructuralError{"unsupported: " + v.Type().String()} return } // canHaveDefaultValue reports whether k is a Kind that we will set a default // value for. (A signed integer, essentially.) func canHaveDefaultValue(k reflect.Kind) bool { switch k { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return true } return false } // setDefaultValue is used to install a default value, from a tag string, into // a Value. It is successful if the field was optional, even if a default value // wasn't provided or it failed to install it into the Value. func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { if !params.optional { return } ok = true if params.defaultValue == nil { return } if canHaveDefaultValue(v.Kind()) { v.SetInt(*params.defaultValue) } return } // Unmarshal parses the DER-encoded ASN.1 data structure b // and uses the reflect package to fill in an arbitrary value pointed at by val. // Because Unmarshal uses the reflect package, the structs // being written to must use upper case field names. If val // is nil or not a pointer, Unmarshal returns an error. // // After parsing b, any bytes that were leftover and not used to fill // val will be returned in rest. When parsing a SEQUENCE into a struct, // any trailing elements of the SEQUENCE that do not have matching // fields in val will not be included in rest, as these are considered // valid elements of the SEQUENCE and not trailing data. // // - An ASN.1 INTEGER can be written to an int, int32, int64, // or *[big.Int]. // If the encoded value does not fit in the Go type, // Unmarshal returns a parse error. // // - An ASN.1 BIT STRING can be written to a [BitString]. // // - An ASN.1 OCTET STRING can be written to a []byte. // // - An ASN.1 OBJECT IDENTIFIER can be written to an [ObjectIdentifier]. // // - An ASN.1 ENUMERATED can be written to an [Enumerated]. // // - An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a [time.Time]. // // - An ASN.1 PrintableString, IA5String, or NumericString can be written to a string. // // - Any of the above ASN.1 values can be written to an interface{}. // The value stored in the interface has the corresponding Go type. // For integers, that type is int64. // // - An ASN.1 SEQUENCE OF x or SET OF x can be written // to a slice if an x can be written to the slice's element type. // // - An ASN.1 SEQUENCE or SET can be written to a struct // if each of the elements in the sequence can be // written to the corresponding element in the struct. // // The following tags on struct fields have special meaning to Unmarshal: // // application specifies that an APPLICATION tag is used // private specifies that a PRIVATE tag is used // default:x sets the default value for optional integer fields (only used if optional is also present) // explicit specifies that an additional, explicit tag wraps the implicit one // optional marks the field as ASN.1 OPTIONAL // set causes a SET, rather than a SEQUENCE type to be expected // tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC // // When decoding an ASN.1 value with an IMPLICIT tag into a string field, // Unmarshal will default to a PrintableString, which doesn't support // characters such as '@' and '&'. To force other encodings, use the following // tags: // // ia5 causes strings to be unmarshaled as ASN.1 IA5String values // numeric causes strings to be unmarshaled as ASN.1 NumericString values // utf8 causes strings to be unmarshaled as ASN.1 UTF8String values // // If the type of the first field of a structure is RawContent then the raw // ASN1 contents of the struct will be stored in it. // // If the name of a slice type ends with "SET" then it's treated as if // the "set" tag was set on it. This results in interpreting the type as a // SET OF x rather than a SEQUENCE OF x. This can be used with nested slices // where a struct tag cannot be given. // // Other ASN.1 types are not supported; if it encounters them, // Unmarshal returns a parse error. func Unmarshal(b []byte, val any) (rest []byte, err error) { return UnmarshalWithParams(b, val, "") } // An invalidUnmarshalError describes an invalid argument passed to Unmarshal. // (The argument to Unmarshal must be a non-nil pointer.) type invalidUnmarshalError struct { Type reflect.Type } func (e *invalidUnmarshalError) Error() string { if e.Type == nil { return "asn1: Unmarshal recipient value is nil" } if e.Type.Kind() != reflect.Pointer { return "asn1: Unmarshal recipient value is non-pointer " + e.Type.String() } return "asn1: Unmarshal recipient value is nil " + e.Type.String() } // UnmarshalWithParams allows field parameters to be specified for the // top-level element. The form of the params is the same as the field tags. func UnmarshalWithParams(b []byte, val any, params string) (rest []byte, err error) { v := reflect.ValueOf(val) if v.Kind() != reflect.Pointer || v.IsNil() { return nil, &invalidUnmarshalError{reflect.TypeOf(val)} } offset, err := parseField(v.Elem(), b, 0, parseFieldParameters(params)) if err != nil { return nil, err } return b[offset:], nil }
go/src/encoding/asn1/asn1.go/0
{ "file_path": "go/src/encoding/asn1/asn1.go", "repo_id": "go", "token_count": 11541 }
236
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package binary // This file implements "varint" encoding of 64-bit integers. // The encoding is: // - unsigned integers are serialized 7 bits at a time, starting with the // least significant bits // - the most significant bit (msb) in each output byte indicates if there // is a continuation byte (msb = 1) // - signed integers are mapped to unsigned integers using "zig-zag" // encoding: Positive values x are written as 2*x + 0, negative values // are written as 2*(^x) + 1; that is, negative numbers are complemented // and whether to complement is encoded in bit 0. // // Design note: // At most 10 bytes are needed for 64-bit values. The encoding could // be more dense: a full 64-bit value needs an extra byte just to hold bit 63. // Instead, the msb of the previous byte could be used to hold bit 63 since we // know there can't be more than 64 bits. This is a trivial improvement and // would reduce the maximum encoding length to 9 bytes. However, it breaks the // invariant that the msb is always the "continuation bit" and thus makes the // format incompatible with a varint encoding for larger numbers (say 128-bit). import ( "errors" "io" ) // MaxVarintLenN is the maximum length of a varint-encoded N-bit integer. const ( MaxVarintLen16 = 3 MaxVarintLen32 = 5 MaxVarintLen64 = 10 ) // AppendUvarint appends the varint-encoded form of x, // as generated by [PutUvarint], to buf and returns the extended buffer. func AppendUvarint(buf []byte, x uint64) []byte { for x >= 0x80 { buf = append(buf, byte(x)|0x80) x >>= 7 } return append(buf, byte(x)) } // PutUvarint encodes a uint64 into buf and returns the number of bytes written. // If the buffer is too small, PutUvarint will panic. func PutUvarint(buf []byte, x uint64) int { i := 0 for x >= 0x80 { buf[i] = byte(x) | 0x80 x >>= 7 i++ } buf[i] = byte(x) return i + 1 } // Uvarint decodes a uint64 from buf and returns that value and the // number of bytes read (> 0). If an error occurred, the value is 0 // and the number of bytes n is <= 0 meaning: // - n == 0: buf too small; // - n < 0: value larger than 64 bits (overflow) and -n is the number of // bytes read. func Uvarint(buf []byte) (uint64, int) { var x uint64 var s uint for i, b := range buf { if i == MaxVarintLen64 { // Catch byte reads past MaxVarintLen64. // See issue https://golang.org/issues/41185 return 0, -(i + 1) // overflow } if b < 0x80 { if i == MaxVarintLen64-1 && b > 1 { return 0, -(i + 1) // overflow } return x | uint64(b)<<s, i + 1 } x |= uint64(b&0x7f) << s s += 7 } return 0, 0 } // AppendVarint appends the varint-encoded form of x, // as generated by [PutVarint], to buf and returns the extended buffer. func AppendVarint(buf []byte, x int64) []byte { ux := uint64(x) << 1 if x < 0 { ux = ^ux } return AppendUvarint(buf, ux) } // PutVarint encodes an int64 into buf and returns the number of bytes written. // If the buffer is too small, PutVarint will panic. func PutVarint(buf []byte, x int64) int { ux := uint64(x) << 1 if x < 0 { ux = ^ux } return PutUvarint(buf, ux) } // Varint decodes an int64 from buf and returns that value and the // number of bytes read (> 0). If an error occurred, the value is 0 // and the number of bytes n is <= 0 with the following meaning: // - n == 0: buf too small; // - n < 0: value larger than 64 bits (overflow) // and -n is the number of bytes read. func Varint(buf []byte) (int64, int) { ux, n := Uvarint(buf) // ok to continue in presence of error x := int64(ux >> 1) if ux&1 != 0 { x = ^x } return x, n } var errOverflow = errors.New("binary: varint overflows a 64-bit integer") // ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64. // The error is [io.EOF] only if no bytes were read. // If an [io.EOF] happens after reading some but not all the bytes, // ReadUvarint returns [io.ErrUnexpectedEOF]. func ReadUvarint(r io.ByteReader) (uint64, error) { var x uint64 var s uint for i := 0; i < MaxVarintLen64; i++ { b, err := r.ReadByte() if err != nil { if i > 0 && err == io.EOF { err = io.ErrUnexpectedEOF } return x, err } if b < 0x80 { if i == MaxVarintLen64-1 && b > 1 { return x, errOverflow } return x | uint64(b)<<s, nil } x |= uint64(b&0x7f) << s s += 7 } return x, errOverflow } // ReadVarint reads an encoded signed integer from r and returns it as an int64. // The error is [io.EOF] only if no bytes were read. // If an [io.EOF] happens after reading some but not all the bytes, // ReadVarint returns [io.ErrUnexpectedEOF]. func ReadVarint(r io.ByteReader) (int64, error) { ux, err := ReadUvarint(r) // ok to continue in presence of error x := int64(ux >> 1) if ux&1 != 0 { x = ^x } return x, err }
go/src/encoding/binary/varint.go/0
{ "file_path": "go/src/encoding/binary/varint.go", "repo_id": "go", "token_count": 1782 }
237
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build ignore package main // Need to compile package gob with debug.go to build this program. // See comments in debug.go for how to do this. import ( "encoding/gob" "fmt" "os" ) func main() { var err error file := os.Stdin if len(os.Args) > 1 { file, err = os.Open(os.Args[1]) if err != nil { fmt.Fprintf(os.Stderr, "dump: %s\n", err) os.Exit(1) } defer file.Close() } gob.Debug(file) }
go/src/encoding/gob/dump.go/0
{ "file_path": "go/src/encoding/gob/dump.go", "repo_id": "go", "token_count": 225 }
238
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package hex import ( "bytes" "fmt" "io" "strings" "testing" ) type encDecTest struct { enc string dec []byte } var encDecTests = []encDecTest{ {"", []byte{}}, {"0001020304050607", []byte{0, 1, 2, 3, 4, 5, 6, 7}}, {"08090a0b0c0d0e0f", []byte{8, 9, 10, 11, 12, 13, 14, 15}}, {"f0f1f2f3f4f5f6f7", []byte{0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7}}, {"f8f9fafbfcfdfeff", []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}}, {"67", []byte{'g'}}, {"e3a1", []byte{0xe3, 0xa1}}, } func TestEncode(t *testing.T) { for i, test := range encDecTests { dst := make([]byte, EncodedLen(len(test.dec))) n := Encode(dst, test.dec) if n != len(dst) { t.Errorf("#%d: bad return value: got: %d want: %d", i, n, len(dst)) } if string(dst) != test.enc { t.Errorf("#%d: got: %#v want: %#v", i, dst, test.enc) } dst = []byte("lead") dst = AppendEncode(dst, test.dec) if string(dst) != "lead"+test.enc { t.Errorf("#%d: got: %#v want: %#v", i, dst, "lead"+test.enc) } } } func TestDecode(t *testing.T) { // Case for decoding uppercase hex characters, since // Encode always uses lowercase. decTests := append(encDecTests, encDecTest{"F8F9FAFBFCFDFEFF", []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}}) for i, test := range decTests { dst := make([]byte, DecodedLen(len(test.enc))) n, err := Decode(dst, []byte(test.enc)) if err != nil { t.Errorf("#%d: bad return value: got:%d want:%d", i, n, len(dst)) } else if !bytes.Equal(dst, test.dec) { t.Errorf("#%d: got: %#v want: %#v", i, dst, test.dec) } dst = []byte("lead") dst, err = AppendDecode(dst, []byte(test.enc)) if err != nil { t.Errorf("#%d: AppendDecode error: %v", i, err) } else if string(dst) != "lead"+string(test.dec) { t.Errorf("#%d: got: %#v want: %#v", i, dst, "lead"+string(test.dec)) } } } func TestEncodeToString(t *testing.T) { for i, test := range encDecTests { s := EncodeToString(test.dec) if s != test.enc { t.Errorf("#%d got:%s want:%s", i, s, test.enc) } } } func TestDecodeString(t *testing.T) { for i, test := range encDecTests { dst, err := DecodeString(test.enc) if err != nil { t.Errorf("#%d: unexpected err value: %s", i, err) continue } if !bytes.Equal(dst, test.dec) { t.Errorf("#%d: got: %#v want: #%v", i, dst, test.dec) } } } var errTests = []struct { in string out string err error }{ {"", "", nil}, {"0", "", ErrLength}, {"zd4aa", "", InvalidByteError('z')}, {"d4aaz", "\xd4\xaa", InvalidByteError('z')}, {"30313", "01", ErrLength}, {"0g", "", InvalidByteError('g')}, {"00gg", "\x00", InvalidByteError('g')}, {"0\x01", "", InvalidByteError('\x01')}, {"ffeed", "\xff\xee", ErrLength}, } func TestDecodeErr(t *testing.T) { for _, tt := range errTests { out := make([]byte, len(tt.in)+10) n, err := Decode(out, []byte(tt.in)) if string(out[:n]) != tt.out || err != tt.err { t.Errorf("Decode(%q) = %q, %v, want %q, %v", tt.in, string(out[:n]), err, tt.out, tt.err) } } } func TestDecodeStringErr(t *testing.T) { for _, tt := range errTests { out, err := DecodeString(tt.in) if string(out) != tt.out || err != tt.err { t.Errorf("DecodeString(%q) = %q, %v, want %q, %v", tt.in, out, err, tt.out, tt.err) } } } func TestEncoderDecoder(t *testing.T) { for _, multiplier := range []int{1, 128, 192} { for _, test := range encDecTests { input := bytes.Repeat(test.dec, multiplier) output := strings.Repeat(test.enc, multiplier) var buf bytes.Buffer enc := NewEncoder(&buf) r := struct{ io.Reader }{bytes.NewReader(input)} // io.Reader only; not io.WriterTo if n, err := io.CopyBuffer(enc, r, make([]byte, 7)); n != int64(len(input)) || err != nil { t.Errorf("encoder.Write(%q*%d) = (%d, %v), want (%d, nil)", test.dec, multiplier, n, err, len(input)) continue } if encDst := buf.String(); encDst != output { t.Errorf("buf(%q*%d) = %v, want %v", test.dec, multiplier, encDst, output) continue } dec := NewDecoder(&buf) var decBuf bytes.Buffer w := struct{ io.Writer }{&decBuf} // io.Writer only; not io.ReaderFrom if _, err := io.CopyBuffer(w, dec, make([]byte, 7)); err != nil || decBuf.Len() != len(input) { t.Errorf("decoder.Read(%q*%d) = (%d, %v), want (%d, nil)", test.enc, multiplier, decBuf.Len(), err, len(input)) } if !bytes.Equal(decBuf.Bytes(), input) { t.Errorf("decBuf(%q*%d) = %v, want %v", test.dec, multiplier, decBuf.Bytes(), input) continue } } } } func TestDecoderErr(t *testing.T) { for _, tt := range errTests { dec := NewDecoder(strings.NewReader(tt.in)) out, err := io.ReadAll(dec) wantErr := tt.err // Decoder is reading from stream, so it reports io.ErrUnexpectedEOF instead of ErrLength. if wantErr == ErrLength { wantErr = io.ErrUnexpectedEOF } if string(out) != tt.out || err != wantErr { t.Errorf("NewDecoder(%q) = %q, %v, want %q, %v", tt.in, out, err, tt.out, wantErr) } } } func TestDumper(t *testing.T) { var in [40]byte for i := range in { in[i] = byte(i + 30) } for stride := 1; stride < len(in); stride++ { var out bytes.Buffer dumper := Dumper(&out) done := 0 for done < len(in) { todo := done + stride if todo > len(in) { todo = len(in) } dumper.Write(in[done:todo]) done = todo } dumper.Close() if !bytes.Equal(out.Bytes(), expectedHexDump) { t.Errorf("stride: %d failed. got:\n%s\nwant:\n%s", stride, out.Bytes(), expectedHexDump) } } } func TestDumper_doubleclose(t *testing.T) { var out strings.Builder dumper := Dumper(&out) dumper.Write([]byte(`gopher`)) dumper.Close() dumper.Close() dumper.Write([]byte(`gopher`)) dumper.Close() expected := "00000000 67 6f 70 68 65 72 |gopher|\n" if out.String() != expected { t.Fatalf("got:\n%#v\nwant:\n%#v", out.String(), expected) } } func TestDumper_earlyclose(t *testing.T) { var out strings.Builder dumper := Dumper(&out) dumper.Close() dumper.Write([]byte(`gopher`)) expected := "" if out.String() != expected { t.Fatalf("got:\n%#v\nwant:\n%#v", out.String(), expected) } } func TestDump(t *testing.T) { var in [40]byte for i := range in { in[i] = byte(i + 30) } out := []byte(Dump(in[:])) if !bytes.Equal(out, expectedHexDump) { t.Errorf("got:\n%s\nwant:\n%s", out, expectedHexDump) } } var expectedHexDump = []byte(`00000000 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d |.. !"#$%&'()*+,-| 00000010 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d |./0123456789:;<=| 00000020 3e 3f 40 41 42 43 44 45 |>?@ABCDE| `) var sink []byte func BenchmarkEncode(b *testing.B) { for _, size := range []int{256, 1024, 4096, 16384} { src := bytes.Repeat([]byte{2, 3, 5, 7, 9, 11, 13, 17}, size/8) sink = make([]byte, 2*size) b.Run(fmt.Sprintf("%v", size), func(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { Encode(sink, src) } }) } } func BenchmarkDecode(b *testing.B) { for _, size := range []int{256, 1024, 4096, 16384} { src := bytes.Repeat([]byte{'2', 'b', '7', '4', '4', 'f', 'a', 'a'}, size/8) sink = make([]byte, size/2) b.Run(fmt.Sprintf("%v", size), func(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { Decode(sink, src) } }) } } func BenchmarkDecodeString(b *testing.B) { for _, size := range []int{256, 1024, 4096, 16384} { src := strings.Repeat("2b744faa", size/8) b.Run(fmt.Sprintf("%v", size), func(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { sink, _ = DecodeString(src) } }) } } func BenchmarkDump(b *testing.B) { for _, size := range []int{256, 1024, 4096, 16384} { src := bytes.Repeat([]byte{2, 3, 5, 7, 9, 11, 13, 17}, size/8) b.Run(fmt.Sprintf("%v", size), func(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { Dump(src) } }) } }
go/src/encoding/hex/hex_test.go/0
{ "file_path": "go/src/encoding/hex/hex_test.go", "repo_id": "go", "token_count": 3818 }
239
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "bytes" "errors" "io" ) // A Decoder reads and decodes JSON values from an input stream. type Decoder struct { r io.Reader buf []byte d decodeState scanp int // start of unread data in buf scanned int64 // amount of data already scanned scan scanner err error tokenState int tokenStack []int } // NewDecoder returns a new decoder that reads from r. // // The decoder introduces its own buffering and may // read data from r beyond the JSON values requested. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } // UseNumber causes the Decoder to unmarshal a number into an // interface value as a [Number] instead of as a float64. func (dec *Decoder) UseNumber() { dec.d.useNumber = true } // DisallowUnknownFields causes the Decoder to return an error when the destination // is a struct and the input contains object keys which do not match any // non-ignored, exported fields in the destination. func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true } // Decode reads the next JSON-encoded value from its // input and stores it in the value pointed to by v. // // See the documentation for [Unmarshal] for details about // the conversion of JSON into a Go value. func (dec *Decoder) Decode(v any) error { if dec.err != nil { return dec.err } if err := dec.tokenPrepareForDecode(); err != nil { return err } if !dec.tokenValueAllowed() { return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()} } // Read whole value into buffer. n, err := dec.readValue() if err != nil { return err } dec.d.init(dec.buf[dec.scanp : dec.scanp+n]) dec.scanp += n // Don't save err from unmarshal into dec.err: // the connection is still usable since we read a complete JSON // object from it before the error happened. err = dec.d.unmarshal(v) // fixup token streaming state dec.tokenValueEnd() return err } // Buffered returns a reader of the data remaining in the Decoder's // buffer. The reader is valid until the next call to [Decoder.Decode]. func (dec *Decoder) Buffered() io.Reader { return bytes.NewReader(dec.buf[dec.scanp:]) } // readValue reads a JSON value into dec.buf. // It returns the length of the encoding. func (dec *Decoder) readValue() (int, error) { dec.scan.reset() scanp := dec.scanp var err error Input: // help the compiler see that scanp is never negative, so it can remove // some bounds checks below. for scanp >= 0 { // Look in the buffer for a new value. for ; scanp < len(dec.buf); scanp++ { c := dec.buf[scanp] dec.scan.bytes++ switch dec.scan.step(&dec.scan, c) { case scanEnd: // scanEnd is delayed one byte so we decrement // the scanner bytes count by 1 to ensure that // this value is correct in the next call of Decode. dec.scan.bytes-- break Input case scanEndObject, scanEndArray: // scanEnd is delayed one byte. // We might block trying to get that byte from src, // so instead invent a space byte. if stateEndValue(&dec.scan, ' ') == scanEnd { scanp++ break Input } case scanError: dec.err = dec.scan.err return 0, dec.scan.err } } // Did the last read have an error? // Delayed until now to allow buffer scan. if err != nil { if err == io.EOF { if dec.scan.step(&dec.scan, ' ') == scanEnd { break Input } if nonSpace(dec.buf) { err = io.ErrUnexpectedEOF } } dec.err = err return 0, err } n := scanp - dec.scanp err = dec.refill() scanp = dec.scanp + n } return scanp - dec.scanp, nil } func (dec *Decoder) refill() error { // Make room to read more into the buffer. // First slide down data already consumed. if dec.scanp > 0 { dec.scanned += int64(dec.scanp) n := copy(dec.buf, dec.buf[dec.scanp:]) dec.buf = dec.buf[:n] dec.scanp = 0 } // Grow buffer if not large enough. const minRead = 512 if cap(dec.buf)-len(dec.buf) < minRead { newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead) copy(newBuf, dec.buf) dec.buf = newBuf } // Read. Delay error for next iteration (after scan). n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) dec.buf = dec.buf[0 : len(dec.buf)+n] return err } func nonSpace(b []byte) bool { for _, c := range b { if !isSpace(c) { return true } } return false } // An Encoder writes JSON values to an output stream. type Encoder struct { w io.Writer err error escapeHTML bool indentBuf []byte indentPrefix string indentValue string } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w: w, escapeHTML: true} } // Encode writes the JSON encoding of v to the stream, // with insignificant space characters elided, // followed by a newline character. // // See the documentation for [Marshal] for details about the // conversion of Go values to JSON. func (enc *Encoder) Encode(v any) error { if enc.err != nil { return enc.err } e := newEncodeState() defer encodeStatePool.Put(e) err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML}) if err != nil { return err } // Terminate each value with a newline. // This makes the output look a little nicer // when debugging, and some kind of space // is required if the encoded value was a number, // so that the reader knows there aren't more // digits coming. e.WriteByte('\n') b := e.Bytes() if enc.indentPrefix != "" || enc.indentValue != "" { enc.indentBuf, err = appendIndent(enc.indentBuf[:0], b, enc.indentPrefix, enc.indentValue) if err != nil { return err } b = enc.indentBuf } if _, err = enc.w.Write(b); err != nil { enc.err = err } return err } // SetIndent instructs the encoder to format each subsequent encoded // value as if indented by the package-level function Indent(dst, src, prefix, indent). // Calling SetIndent("", "") disables indentation. func (enc *Encoder) SetIndent(prefix, indent string) { enc.indentPrefix = prefix enc.indentValue = indent } // SetEscapeHTML specifies whether problematic HTML characters // should be escaped inside JSON quoted strings. // The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e // to avoid certain safety problems that can arise when embedding JSON in HTML. // // In non-HTML settings where the escaping interferes with the readability // of the output, SetEscapeHTML(false) disables this behavior. func (enc *Encoder) SetEscapeHTML(on bool) { enc.escapeHTML = on } // RawMessage is a raw encoded JSON value. // It implements [Marshaler] and [Unmarshaler] and can // be used to delay JSON decoding or precompute a JSON encoding. type RawMessage []byte // MarshalJSON returns m as the JSON encoding of m. func (m RawMessage) MarshalJSON() ([]byte, error) { if m == nil { return []byte("null"), nil } return m, nil } // UnmarshalJSON sets *m to a copy of data. func (m *RawMessage) UnmarshalJSON(data []byte) error { if m == nil { return errors.New("json.RawMessage: UnmarshalJSON on nil pointer") } *m = append((*m)[0:0], data...) return nil } var _ Marshaler = (*RawMessage)(nil) var _ Unmarshaler = (*RawMessage)(nil) // A Token holds a value of one of these types: // // - [Delim], for the four JSON delimiters [ ] { } // - bool, for JSON booleans // - float64, for JSON numbers // - [Number], for JSON numbers // - string, for JSON string literals // - nil, for JSON null type Token any const ( tokenTopValue = iota tokenArrayStart tokenArrayValue tokenArrayComma tokenObjectStart tokenObjectKey tokenObjectColon tokenObjectValue tokenObjectComma ) // advance tokenstate from a separator state to a value state func (dec *Decoder) tokenPrepareForDecode() error { // Note: Not calling peek before switch, to avoid // putting peek into the standard Decode path. // peek is only called when using the Token API. switch dec.tokenState { case tokenArrayComma: c, err := dec.peek() if err != nil { return err } if c != ',' { return &SyntaxError{"expected comma after array element", dec.InputOffset()} } dec.scanp++ dec.tokenState = tokenArrayValue case tokenObjectColon: c, err := dec.peek() if err != nil { return err } if c != ':' { return &SyntaxError{"expected colon after object key", dec.InputOffset()} } dec.scanp++ dec.tokenState = tokenObjectValue } return nil } func (dec *Decoder) tokenValueAllowed() bool { switch dec.tokenState { case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue: return true } return false } func (dec *Decoder) tokenValueEnd() { switch dec.tokenState { case tokenArrayStart, tokenArrayValue: dec.tokenState = tokenArrayComma case tokenObjectValue: dec.tokenState = tokenObjectComma } } // A Delim is a JSON array or object delimiter, one of [ ] { or }. type Delim rune func (d Delim) String() string { return string(d) } // Token returns the next JSON token in the input stream. // At the end of the input stream, Token returns nil, [io.EOF]. // // Token guarantees that the delimiters [ ] { } it returns are // properly nested and matched: if Token encounters an unexpected // delimiter in the input, it will return an error. // // The input stream consists of basic JSON values—bool, string, // number, and null—along with delimiters [ ] { } of type [Delim] // to mark the start and end of arrays and objects. // Commas and colons are elided. func (dec *Decoder) Token() (Token, error) { for { c, err := dec.peek() if err != nil { return nil, err } switch c { case '[': if !dec.tokenValueAllowed() { return dec.tokenError(c) } dec.scanp++ dec.tokenStack = append(dec.tokenStack, dec.tokenState) dec.tokenState = tokenArrayStart return Delim('['), nil case ']': if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma { return dec.tokenError(c) } dec.scanp++ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1] dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1] dec.tokenValueEnd() return Delim(']'), nil case '{': if !dec.tokenValueAllowed() { return dec.tokenError(c) } dec.scanp++ dec.tokenStack = append(dec.tokenStack, dec.tokenState) dec.tokenState = tokenObjectStart return Delim('{'), nil case '}': if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma { return dec.tokenError(c) } dec.scanp++ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1] dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1] dec.tokenValueEnd() return Delim('}'), nil case ':': if dec.tokenState != tokenObjectColon { return dec.tokenError(c) } dec.scanp++ dec.tokenState = tokenObjectValue continue case ',': if dec.tokenState == tokenArrayComma { dec.scanp++ dec.tokenState = tokenArrayValue continue } if dec.tokenState == tokenObjectComma { dec.scanp++ dec.tokenState = tokenObjectKey continue } return dec.tokenError(c) case '"': if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey { var x string old := dec.tokenState dec.tokenState = tokenTopValue err := dec.Decode(&x) dec.tokenState = old if err != nil { return nil, err } dec.tokenState = tokenObjectColon return x, nil } fallthrough default: if !dec.tokenValueAllowed() { return dec.tokenError(c) } var x any if err := dec.Decode(&x); err != nil { return nil, err } return x, nil } } } func (dec *Decoder) tokenError(c byte) (Token, error) { var context string switch dec.tokenState { case tokenTopValue: context = " looking for beginning of value" case tokenArrayStart, tokenArrayValue, tokenObjectValue: context = " looking for beginning of value" case tokenArrayComma: context = " after array element" case tokenObjectKey: context = " looking for beginning of object key string" case tokenObjectColon: context = " after object key" case tokenObjectComma: context = " after object key:value pair" } return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()} } // More reports whether there is another element in the // current array or object being parsed. func (dec *Decoder) More() bool { c, err := dec.peek() return err == nil && c != ']' && c != '}' } func (dec *Decoder) peek() (byte, error) { var err error for { for i := dec.scanp; i < len(dec.buf); i++ { c := dec.buf[i] if isSpace(c) { continue } dec.scanp = i return c, nil } // buffer has been scanned, now report any error if err != nil { return 0, err } err = dec.refill() } } // InputOffset returns the input stream byte offset of the current decoder position. // The offset gives the location of the end of the most recently returned token // and the beginning of the next token. func (dec *Decoder) InputOffset() int64 { return dec.scanned + int64(dec.scanp) }
go/src/encoding/json/stream.go/0
{ "file_path": "go/src/encoding/json/stream.go", "repo_id": "go", "token_count": 4741 }
240
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package xml import ( "bytes" "encoding" "errors" "fmt" "reflect" "runtime" "strconv" "strings" ) // BUG(rsc): Mapping between XML elements and data structures is inherently flawed: // an XML element is an order-dependent collection of anonymous // values, while a data structure is an order-independent collection // of named values. // See [encoding/json] for a textual representation more suitable // to data structures. // Unmarshal parses the XML-encoded data and stores the result in // the value pointed to by v, which must be an arbitrary struct, // slice, or string. Well-formed data that does not fit into v is // discarded. // // Because Unmarshal uses the reflect package, it can only assign // to exported (upper case) fields. Unmarshal uses a case-sensitive // comparison to match XML element names to tag values and struct // field names. // // Unmarshal maps an XML element to a struct using the following rules. // In the rules, the tag of a field refers to the value associated with the // key 'xml' in the struct field's tag (see the example above). // // - If the struct has a field of type []byte or string with tag // ",innerxml", Unmarshal accumulates the raw XML nested inside the // element in that field. The rest of the rules still apply. // // - If the struct has a field named XMLName of type Name, // Unmarshal records the element name in that field. // // - If the XMLName field has an associated tag of the form // "name" or "namespace-URL name", the XML element must have // the given name (and, optionally, name space) or else Unmarshal // returns an error. // // - If the XML element has an attribute whose name matches a // struct field name with an associated tag containing ",attr" or // the explicit name in a struct field tag of the form "name,attr", // Unmarshal records the attribute value in that field. // // - If the XML element has an attribute not handled by the previous // rule and the struct has a field with an associated tag containing // ",any,attr", Unmarshal records the attribute value in the first // such field. // // - If the XML element contains character data, that data is // accumulated in the first struct field that has tag ",chardata". // The struct field may have type []byte or string. // If there is no such field, the character data is discarded. // // - If the XML element contains comments, they are accumulated in // the first struct field that has tag ",comment". The struct // field may have type []byte or string. If there is no such // field, the comments are discarded. // // - If the XML element contains a sub-element whose name matches // the prefix of a tag formatted as "a" or "a>b>c", unmarshal // will descend into the XML structure looking for elements with the // given names, and will map the innermost elements to that struct // field. A tag starting with ">" is equivalent to one starting // with the field name followed by ">". // // - If the XML element contains a sub-element whose name matches // a struct field's XMLName tag and the struct field has no // explicit name tag as per the previous rule, unmarshal maps // the sub-element to that struct field. // // - If the XML element contains a sub-element whose name matches a // field without any mode flags (",attr", ",chardata", etc), Unmarshal // maps the sub-element to that struct field. // // - If the XML element contains a sub-element that hasn't matched any // of the above rules and the struct has a field with tag ",any", // unmarshal maps the sub-element to that struct field. // // - An anonymous struct field is handled as if the fields of its // value were part of the outer struct. // // - A struct field with tag "-" is never unmarshaled into. // // If Unmarshal encounters a field type that implements the Unmarshaler // interface, Unmarshal calls its UnmarshalXML method to produce the value from // the XML element. Otherwise, if the value implements // [encoding.TextUnmarshaler], Unmarshal calls that value's UnmarshalText method. // // Unmarshal maps an XML element to a string or []byte by saving the // concatenation of that element's character data in the string or // []byte. The saved []byte is never nil. // // Unmarshal maps an attribute value to a string or []byte by saving // the value in the string or slice. // // Unmarshal maps an attribute value to an [Attr] by saving the attribute, // including its name, in the Attr. // // Unmarshal maps an XML element or attribute value to a slice by // extending the length of the slice and mapping the element or attribute // to the newly created value. // // Unmarshal maps an XML element or attribute value to a bool by // setting it to the boolean value represented by the string. Whitespace // is trimmed and ignored. // // Unmarshal maps an XML element or attribute value to an integer or // floating-point field by setting the field to the result of // interpreting the string value in decimal. There is no check for // overflow. Whitespace is trimmed and ignored. // // Unmarshal maps an XML element to a Name by recording the element // name. // // Unmarshal maps an XML element to a pointer by setting the pointer // to a freshly allocated value and then mapping the element to that value. // // A missing element or empty attribute value will be unmarshaled as a zero value. // If the field is a slice, a zero value will be appended to the field. Otherwise, the // field will be set to its zero value. func Unmarshal(data []byte, v any) error { return NewDecoder(bytes.NewReader(data)).Decode(v) } // Decode works like [Unmarshal], except it reads the decoder // stream to find the start element. func (d *Decoder) Decode(v any) error { return d.DecodeElement(v, nil) } // DecodeElement works like [Unmarshal] except that it takes // a pointer to the start XML element to decode into v. // It is useful when a client reads some raw XML tokens itself // but also wants to defer to [Unmarshal] for some elements. func (d *Decoder) DecodeElement(v any, start *StartElement) error { val := reflect.ValueOf(v) if val.Kind() != reflect.Pointer { return errors.New("non-pointer passed to Unmarshal") } if val.IsNil() { return errors.New("nil pointer passed to Unmarshal") } return d.unmarshal(val.Elem(), start, 0) } // An UnmarshalError represents an error in the unmarshaling process. type UnmarshalError string func (e UnmarshalError) Error() string { return string(e) } // Unmarshaler is the interface implemented by objects that can unmarshal // an XML element description of themselves. // // UnmarshalXML decodes a single XML element // beginning with the given start element. // If it returns an error, the outer call to Unmarshal stops and // returns that error. // UnmarshalXML must consume exactly one XML element. // One common implementation strategy is to unmarshal into // a separate value with a layout matching the expected XML // using d.DecodeElement, and then to copy the data from // that value into the receiver. // Another common strategy is to use d.Token to process the // XML object one token at a time. // UnmarshalXML may not use d.RawToken. type Unmarshaler interface { UnmarshalXML(d *Decoder, start StartElement) error } // UnmarshalerAttr is the interface implemented by objects that can unmarshal // an XML attribute description of themselves. // // UnmarshalXMLAttr decodes a single XML attribute. // If it returns an error, the outer call to [Unmarshal] stops and // returns that error. // UnmarshalXMLAttr is used only for struct fields with the // "attr" option in the field tag. type UnmarshalerAttr interface { UnmarshalXMLAttr(attr Attr) error } // receiverType returns the receiver type to use in an expression like "%s.MethodName". func receiverType(val any) string { t := reflect.TypeOf(val) if t.Name() != "" { return t.String() } return "(" + t.String() + ")" } // unmarshalInterface unmarshals a single XML element into val. // start is the opening tag of the element. func (d *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { // Record that decoder must stop at end tag corresponding to start. d.pushEOF() d.unmarshalDepth++ err := val.UnmarshalXML(d, *start) d.unmarshalDepth-- if err != nil { d.popEOF() return err } if !d.popEOF() { return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) } return nil } // unmarshalTextInterface unmarshals a single XML element into val. // The chardata contained in the element (but not its children) // is passed to the text unmarshaler. func (d *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler) error { var buf []byte depth := 1 for depth > 0 { t, err := d.Token() if err != nil { return err } switch t := t.(type) { case CharData: if depth == 1 { buf = append(buf, t...) } case StartElement: depth++ case EndElement: depth-- } } return val.UnmarshalText(buf) } // unmarshalAttr unmarshals a single XML attribute into val. func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { if val.Kind() == reflect.Pointer { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) } } // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) } } if val.Type().Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { // Slice of element values. // Grow slice. n := val.Len() val.Grow(1) val.SetLen(n + 1) // Recur to read element into slice. if err := d.unmarshalAttr(val.Index(n), attr); err != nil { val.SetLen(n) return err } return nil } if val.Type() == attrType { val.Set(reflect.ValueOf(attr)) return nil } return copyValue(val, []byte(attr.Value)) } var ( attrType = reflect.TypeFor[Attr]() unmarshalerType = reflect.TypeFor[Unmarshaler]() unmarshalerAttrType = reflect.TypeFor[UnmarshalerAttr]() textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() ) const ( maxUnmarshalDepth = 10000 maxUnmarshalDepthWasm = 5000 // go.dev/issue/56498 ) var errUnmarshalDepth = errors.New("exceeded max depth") // Unmarshal a single XML element into val. func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error { if depth >= maxUnmarshalDepth || runtime.GOARCH == "wasm" && depth >= maxUnmarshalDepthWasm { return errUnmarshalDepth } // Find start element if we need it. if start == nil { for { tok, err := d.Token() if err != nil { return err } if t, ok := tok.(StartElement); ok { start = &t break } } } // Load value from interface, but only if the result will be // usefully addressable. if val.Kind() == reflect.Interface && !val.IsNil() { e := val.Elem() if e.Kind() == reflect.Pointer && !e.IsNil() { val = e } } if val.Kind() == reflect.Pointer { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } if val.CanInterface() && val.Type().Implements(unmarshalerType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. return d.unmarshalInterface(val.Interface().(Unmarshaler), start) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { return d.unmarshalInterface(pv.Interface().(Unmarshaler), start) } } if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { return d.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { return d.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler)) } } var ( data []byte saveData reflect.Value comment []byte saveComment reflect.Value saveXML reflect.Value saveXMLIndex int saveXMLData []byte saveAny reflect.Value sv reflect.Value tinfo *typeInfo err error ) switch v := val; v.Kind() { default: return errors.New("unknown type " + v.Type().String()) case reflect.Interface: // TODO: For now, simply ignore the field. In the near // future we may choose to unmarshal the start // element on it, if not nil. return d.Skip() case reflect.Slice: typ := v.Type() if typ.Elem().Kind() == reflect.Uint8 { // []byte saveData = v break } // Slice of element values. // Grow slice. n := v.Len() v.Grow(1) v.SetLen(n + 1) // Recur to read element into slice. if err := d.unmarshal(v.Index(n), start, depth+1); err != nil { v.SetLen(n) return err } return nil case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: saveData = v case reflect.Struct: typ := v.Type() if typ == nameType { v.Set(reflect.ValueOf(start.Name)) break } sv = v tinfo, err = getTypeInfo(typ) if err != nil { return err } // Validate and assign element name. if tinfo.xmlname != nil { finfo := tinfo.xmlname if finfo.name != "" && finfo.name != start.Name.Local { return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") } if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " if start.Name.Space == "" { e += "no name space" } else { e += start.Name.Space } return UnmarshalError(e) } fv := finfo.value(sv, initNilPointers) if _, ok := fv.Interface().(Name); ok { fv.Set(reflect.ValueOf(start.Name)) } } // Assign attributes. for _, a := range start.Attr { handled := false any := -1 for i := range tinfo.fields { finfo := &tinfo.fields[i] switch finfo.flags & fMode { case fAttr: strv := finfo.value(sv, initNilPointers) if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { if err := d.unmarshalAttr(strv, a); err != nil { return err } handled = true } case fAny | fAttr: if any == -1 { any = i } } } if !handled && any >= 0 { finfo := &tinfo.fields[any] strv := finfo.value(sv, initNilPointers) if err := d.unmarshalAttr(strv, a); err != nil { return err } } } // Determine whether we need to save character data or comments. for i := range tinfo.fields { finfo := &tinfo.fields[i] switch finfo.flags & fMode { case fCDATA, fCharData: if !saveData.IsValid() { saveData = finfo.value(sv, initNilPointers) } case fComment: if !saveComment.IsValid() { saveComment = finfo.value(sv, initNilPointers) } case fAny, fAny | fElement: if !saveAny.IsValid() { saveAny = finfo.value(sv, initNilPointers) } case fInnerXML: if !saveXML.IsValid() { saveXML = finfo.value(sv, initNilPointers) if d.saved == nil { saveXMLIndex = 0 d.saved = new(bytes.Buffer) } else { saveXMLIndex = d.savedOffset() } } } } } // Find end element. // Process sub-elements along the way. Loop: for { var savedOffset int if saveXML.IsValid() { savedOffset = d.savedOffset() } tok, err := d.Token() if err != nil { return err } switch t := tok.(type) { case StartElement: consumed := false if sv.IsValid() { // unmarshalPath can call unmarshal, so we need to pass the depth through so that // we can continue to enforce the maximum recursion limit. consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth) if err != nil { return err } if !consumed && saveAny.IsValid() { consumed = true if err := d.unmarshal(saveAny, &t, depth+1); err != nil { return err } } } if !consumed { if err := d.Skip(); err != nil { return err } } case EndElement: if saveXML.IsValid() { saveXMLData = d.saved.Bytes()[saveXMLIndex:savedOffset] if saveXMLIndex == 0 { d.saved = nil } } break Loop case CharData: if saveData.IsValid() { data = append(data, t...) } case Comment: if saveComment.IsValid() { comment = append(comment, t...) } } } if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return err } saveData = reflect.Value{} } if saveData.IsValid() && saveData.CanAddr() { pv := saveData.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return err } saveData = reflect.Value{} } } if err := copyValue(saveData, data); err != nil { return err } switch t := saveComment; t.Kind() { case reflect.String: t.SetString(string(comment)) case reflect.Slice: t.Set(reflect.ValueOf(comment)) } switch t := saveXML; t.Kind() { case reflect.String: t.SetString(string(saveXMLData)) case reflect.Slice: if t.Type().Elem().Kind() == reflect.Uint8 { t.Set(reflect.ValueOf(saveXMLData)) } } return nil } func copyValue(dst reflect.Value, src []byte) (err error) { dst0 := dst if dst.Kind() == reflect.Pointer { if dst.IsNil() { dst.Set(reflect.New(dst.Type().Elem())) } dst = dst.Elem() } // Save accumulated data. switch dst.Kind() { case reflect.Invalid: // Probably a comment. default: return errors.New("cannot unmarshal into " + dst0.Type().String()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if len(src) == 0 { dst.SetInt(0) return nil } itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } dst.SetInt(itmp) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: if len(src) == 0 { dst.SetUint(0) return nil } utmp, err := strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } dst.SetUint(utmp) case reflect.Float32, reflect.Float64: if len(src) == 0 { dst.SetFloat(0) return nil } ftmp, err := strconv.ParseFloat(strings.TrimSpace(string(src)), dst.Type().Bits()) if err != nil { return err } dst.SetFloat(ftmp) case reflect.Bool: if len(src) == 0 { dst.SetBool(false) return nil } value, err := strconv.ParseBool(strings.TrimSpace(string(src))) if err != nil { return err } dst.SetBool(value) case reflect.String: dst.SetString(string(src)) case reflect.Slice: if len(src) == 0 { // non-nil to flag presence src = []byte{} } dst.SetBytes(src) } return nil } // unmarshalPath walks down an XML structure looking for wanted // paths, and calls unmarshal on them. // The consumed result tells whether XML elements have been consumed // from the Decoder until start's matching end element, or if it's // still untouched because start is uninteresting for sv's fields. func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) { recurse := false Loop: for i := range tinfo.fields { finfo := &tinfo.fields[i] if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { continue } for j := range parents { if parents[j] != finfo.parents[j] { continue Loop } } if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { // It's a perfect match, unmarshal the field. return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1) } if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { // It's a prefix for the field. Break and recurse // since it's not ok for one field path to be itself // the prefix for another field path. recurse = true // We can reuse the same slice as long as we // don't try to append to it. parents = finfo.parents[:len(parents)+1] break } } if !recurse { // We have no business with this element. return false, nil } // The element is not a perfect match for any field, but one // or more fields have the path to this element as a parent // prefix. Recurse and attempt to match these. for { var tok Token tok, err = d.Token() if err != nil { return true, err } switch t := tok.(type) { case StartElement: // the recursion depth of unmarshalPath is limited to the path length specified // by the struct field tag, so we don't increment the depth here. consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth) if err != nil { return true, err } if !consumed2 { if err := d.Skip(); err != nil { return true, err } } case EndElement: return true, nil } } } // Skip reads tokens until it has consumed the end element // matching the most recent start element already consumed, // skipping nested structures. // It returns nil if it finds an end element matching the start // element; otherwise it returns an error describing the problem. func (d *Decoder) Skip() error { var depth int64 for { tok, err := d.Token() if err != nil { return err } switch tok.(type) { case StartElement: depth++ case EndElement: if depth == 0 { return nil } depth-- } } }
go/src/encoding/xml/read.go/0
{ "file_path": "go/src/encoding/xml/read.go", "repo_id": "go", "token_count": 8459 }
241
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // These examples demonstrate more intricate uses of the flag package. package flag_test import ( "errors" "flag" "fmt" "strings" "time" ) // Example 1: A single string flag called "species" with default value "gopher". var species = flag.String("species", "gopher", "the species we are studying") // Example 2: Two flags sharing a variable, so we can have a shorthand. // The order of initialization is undefined, so make sure both use the // same default value. They must be set up with an init function. var gopherType string func init() { const ( defaultGopher = "pocket" usage = "the variety of gopher" ) flag.StringVar(&gopherType, "gopher_type", defaultGopher, usage) flag.StringVar(&gopherType, "g", defaultGopher, usage+" (shorthand)") } // Example 3: A user-defined flag type, a slice of durations. type interval []time.Duration // String is the method to format the flag's value, part of the flag.Value interface. // The String method's output will be used in diagnostics. func (i *interval) String() string { return fmt.Sprint(*i) } // Set is the method to set the flag value, part of the flag.Value interface. // Set's argument is a string to be parsed to set the flag. // It's a comma-separated list, so we split it. func (i *interval) Set(value string) error { // If we wanted to allow the flag to be set multiple times, // accumulating values, we would delete this if statement. // That would permit usages such as // -deltaT 10s -deltaT 15s // and other combinations. if len(*i) > 0 { return errors.New("interval flag already set") } for _, dt := range strings.Split(value, ",") { duration, err := time.ParseDuration(dt) if err != nil { return err } *i = append(*i, duration) } return nil } // Define a flag to accumulate durations. Because it has a special type, // we need to use the Var function and therefore create the flag during // init. var intervalFlag interval func init() { // Tie the command-line flag to the intervalFlag variable and // set a usage message. flag.Var(&intervalFlag, "deltaT", "comma-separated list of intervals to use between events") } func Example() { // All the interesting pieces are with the variables declared above, but // to enable the flag package to see the flags defined there, one must // execute, typically at the start of main (not init!): // flag.Parse() // We don't call it here because this code is a function called "Example" // that is part of the testing suite for the package, which has already // parsed the flags. When viewed at pkg.go.dev, however, the function is // renamed to "main" and it could be run as a standalone example. }
go/src/flag/example_test.go/0
{ "file_path": "go/src/flag/example_test.go", "repo_id": "go", "token_count": 845 }
242
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fmt_test import ( "bufio" "bytes" "errors" . "fmt" "io" "math" "reflect" "regexp" "strings" "testing" "testing/iotest" "unicode/utf8" ) type ScanTest struct { text string in any out any } type ScanfTest struct { format string text string in any out any } type ScanfMultiTest struct { format string text string in []any out []any err string } var ( boolVal bool intVal int int8Val int8 int16Val int16 int32Val int32 int64Val int64 uintVal uint uint8Val uint8 uint16Val uint16 uint32Val uint32 uint64Val uint64 uintptrVal uintptr float32Val float32 float64Val float64 stringVal string bytesVal []byte runeVal rune complex64Val complex64 complex128Val complex128 renamedBoolVal renamedBool renamedIntVal renamedInt renamedInt8Val renamedInt8 renamedInt16Val renamedInt16 renamedInt32Val renamedInt32 renamedInt64Val renamedInt64 renamedUintVal renamedUint renamedUint8Val renamedUint8 renamedUint16Val renamedUint16 renamedUint32Val renamedUint32 renamedUint64Val renamedUint64 renamedUintptrVal renamedUintptr renamedStringVal renamedString renamedBytesVal renamedBytes renamedFloat32Val renamedFloat32 renamedFloat64Val renamedFloat64 renamedComplex64Val renamedComplex64 renamedComplex128Val renamedComplex128 ) // Xs accepts any non-empty run of the verb character type Xs string func (x *Xs) Scan(state ScanState, verb rune) error { tok, err := state.Token(true, func(r rune) bool { return r == verb }) if err != nil { return err } s := string(tok) if !regexp.MustCompile("^" + string(verb) + "+$").MatchString(s) { return errors.New("syntax error for xs") } *x = Xs(s) return nil } var xVal Xs // IntString accepts an integer followed immediately by a string. // It tests the embedding of a scan within a scan. type IntString struct { i int s string } func (s *IntString) Scan(state ScanState, verb rune) error { if _, err := Fscan(state, &s.i); err != nil { return err } tok, err := state.Token(true, nil) if err != nil { return err } s.s = string(tok) return nil } var intStringVal IntString var scanTests = []ScanTest{ // Basic types {"T\n", &boolVal, true}, // boolean test vals toggle to be sure they are written {"F\n", &boolVal, false}, // restored to zero value {"21\n", &intVal, 21}, {"2_1\n", &intVal, 21}, {"0\n", &intVal, 0}, {"000\n", &intVal, 0}, {"0x10\n", &intVal, 0x10}, {"0x_1_0\n", &intVal, 0x10}, {"-0x10\n", &intVal, -0x10}, {"0377\n", &intVal, 0377}, {"0_3_7_7\n", &intVal, 0377}, {"0o377\n", &intVal, 0377}, {"0o_3_7_7\n", &intVal, 0377}, {"-0377\n", &intVal, -0377}, {"-0o377\n", &intVal, -0377}, {"0\n", &uintVal, uint(0)}, {"000\n", &uintVal, uint(0)}, {"0x10\n", &uintVal, uint(0x10)}, {"0377\n", &uintVal, uint(0377)}, {"22\n", &int8Val, int8(22)}, {"23\n", &int16Val, int16(23)}, {"24\n", &int32Val, int32(24)}, {"25\n", &int64Val, int64(25)}, {"127\n", &int8Val, int8(127)}, {"-21\n", &intVal, -21}, {"-22\n", &int8Val, int8(-22)}, {"-23\n", &int16Val, int16(-23)}, {"-24\n", &int32Val, int32(-24)}, {"-25\n", &int64Val, int64(-25)}, {"-128\n", &int8Val, int8(-128)}, {"+21\n", &intVal, +21}, {"+22\n", &int8Val, int8(+22)}, {"+23\n", &int16Val, int16(+23)}, {"+24\n", &int32Val, int32(+24)}, {"+25\n", &int64Val, int64(+25)}, {"+127\n", &int8Val, int8(+127)}, {"26\n", &uintVal, uint(26)}, {"27\n", &uint8Val, uint8(27)}, {"28\n", &uint16Val, uint16(28)}, {"29\n", &uint32Val, uint32(29)}, {"30\n", &uint64Val, uint64(30)}, {"31\n", &uintptrVal, uintptr(31)}, {"255\n", &uint8Val, uint8(255)}, {"32767\n", &int16Val, int16(32767)}, {"2.3\n", &float64Val, 2.3}, {"2.3e1\n", &float32Val, float32(2.3e1)}, {"2.3e2\n", &float64Val, 2.3e2}, {"2.3p2\n", &float64Val, 2.3 * 4}, {"2.3p+2\n", &float64Val, 2.3 * 4}, {"2.3p+66\n", &float64Val, 2.3 * (1 << 66)}, {"2.3p-66\n", &float64Val, 2.3 / (1 << 66)}, {"0x2.3p-66\n", &float64Val, float64(0x23) / (1 << 70)}, {"2_3.4_5\n", &float64Val, 23.45}, {"2.35\n", &stringVal, "2.35"}, {"2345678\n", &bytesVal, []byte("2345678")}, {"(3.4e1-2i)\n", &complex128Val, 3.4e1 - 2i}, {"-3.45e1-3i\n", &complex64Val, complex64(-3.45e1 - 3i)}, {"-.45e1-1e2i\n", &complex128Val, complex128(-.45e1 - 100i)}, {"-.4_5e1-1E2i\n", &complex128Val, complex128(-.45e1 - 100i)}, {"0x1.0p1+0x1.0P2i\n", &complex128Val, complex128(2 + 4i)}, {"-0x1p1-0x1p2i\n", &complex128Val, complex128(-2 - 4i)}, {"-0x1ep-1-0x1p2i\n", &complex128Val, complex128(-15 - 4i)}, {"-0x1_Ep-1-0x1p0_2i\n", &complex128Val, complex128(-15 - 4i)}, {"hello\n", &stringVal, "hello"}, // Carriage-return followed by newline. (We treat \r\n as \n always.) {"hello\r\n", &stringVal, "hello"}, {"27\r\n", &uint8Val, uint8(27)}, // Renamed types {"true\n", &renamedBoolVal, renamedBool(true)}, {"F\n", &renamedBoolVal, renamedBool(false)}, {"101\n", &renamedIntVal, renamedInt(101)}, {"102\n", &renamedIntVal, renamedInt(102)}, {"103\n", &renamedUintVal, renamedUint(103)}, {"104\n", &renamedUintVal, renamedUint(104)}, {"105\n", &renamedInt8Val, renamedInt8(105)}, {"106\n", &renamedInt16Val, renamedInt16(106)}, {"107\n", &renamedInt32Val, renamedInt32(107)}, {"108\n", &renamedInt64Val, renamedInt64(108)}, {"109\n", &renamedUint8Val, renamedUint8(109)}, {"110\n", &renamedUint16Val, renamedUint16(110)}, {"111\n", &renamedUint32Val, renamedUint32(111)}, {"112\n", &renamedUint64Val, renamedUint64(112)}, {"113\n", &renamedUintptrVal, renamedUintptr(113)}, {"114\n", &renamedStringVal, renamedString("114")}, {"115\n", &renamedBytesVal, renamedBytes([]byte("115"))}, // Custom scanners. {" vvv ", &xVal, Xs("vvv")}, {" 1234hello", &intStringVal, IntString{1234, "hello"}}, // Fixed bugs {"2147483648\n", &int64Val, int64(2147483648)}, // was: integer overflow } var scanfTests = []ScanfTest{ {"%v", "TRUE\n", &boolVal, true}, {"%t", "false\n", &boolVal, false}, {"%v", "-71\n", &intVal, -71}, {"%v", "-7_1\n", &intVal, -71}, {"%v", "0b111\n", &intVal, 7}, {"%v", "0b_1_1_1\n", &intVal, 7}, {"%v", "0377\n", &intVal, 0377}, {"%v", "0_3_7_7\n", &intVal, 0377}, {"%v", "0o377\n", &intVal, 0377}, {"%v", "0o_3_7_7\n", &intVal, 0377}, {"%v", "0x44\n", &intVal, 0x44}, {"%v", "0x_4_4\n", &intVal, 0x44}, {"%d", "72\n", &intVal, 72}, {"%c", "a\n", &runeVal, 'a'}, {"%c", "\u5072\n", &runeVal, '\u5072'}, {"%c", "\u1234\n", &runeVal, '\u1234'}, {"%d", "73\n", &int8Val, int8(73)}, {"%d", "+74\n", &int16Val, int16(74)}, {"%d", "75\n", &int32Val, int32(75)}, {"%d", "76\n", &int64Val, int64(76)}, {"%b", "1001001\n", &intVal, 73}, {"%o", "075\n", &intVal, 075}, {"%x", "a75\n", &intVal, 0xa75}, {"%v", "71\n", &uintVal, uint(71)}, {"%d", "72\n", &uintVal, uint(72)}, {"%d", "7_2\n", &uintVal, uint(7)}, // only %v takes underscores {"%d", "73\n", &uint8Val, uint8(73)}, {"%d", "74\n", &uint16Val, uint16(74)}, {"%d", "75\n", &uint32Val, uint32(75)}, {"%d", "76\n", &uint64Val, uint64(76)}, {"%d", "77\n", &uintptrVal, uintptr(77)}, {"%b", "1001001\n", &uintVal, uint(73)}, {"%b", "100_1001\n", &uintVal, uint(4)}, {"%o", "075\n", &uintVal, uint(075)}, {"%o", "07_5\n", &uintVal, uint(07)}, // only %v takes underscores {"%x", "a75\n", &uintVal, uint(0xa75)}, {"%x", "A75\n", &uintVal, uint(0xa75)}, {"%x", "A7_5\n", &uintVal, uint(0xa7)}, // only %v takes underscores {"%U", "U+1234\n", &intVal, int(0x1234)}, {"%U", "U+4567\n", &uintVal, uint(0x4567)}, {"%e", "2.3\n", &float64Val, 2.3}, {"%E", "2.3e1\n", &float32Val, float32(2.3e1)}, {"%f", "2.3e2\n", &float64Val, 2.3e2}, {"%g", "2.3p2\n", &float64Val, 2.3 * 4}, {"%G", "2.3p+2\n", &float64Val, 2.3 * 4}, {"%v", "2.3p+66\n", &float64Val, 2.3 * (1 << 66)}, {"%f", "2.3p-66\n", &float64Val, 2.3 / (1 << 66)}, {"%G", "0x2.3p-66\n", &float64Val, float64(0x23) / (1 << 70)}, {"%E", "2_3.4_5\n", &float64Val, 23.45}, // Strings {"%s", "using-%s\n", &stringVal, "using-%s"}, {"%x", "7573696e672d2578\n", &stringVal, "using-%x"}, {"%X", "7573696E672D2558\n", &stringVal, "using-%X"}, {"%q", `"quoted\twith\\do\u0075bl\x65s"` + "\n", &stringVal, "quoted\twith\\doubles"}, {"%q", "`quoted with backs`\n", &stringVal, "quoted with backs"}, // Byte slices {"%s", "bytes-%s\n", &bytesVal, []byte("bytes-%s")}, {"%x", "62797465732d2578\n", &bytesVal, []byte("bytes-%x")}, {"%X", "62797465732D2558\n", &bytesVal, []byte("bytes-%X")}, {"%q", `"bytes\rwith\vdo\u0075bl\x65s"` + "\n", &bytesVal, []byte("bytes\rwith\vdoubles")}, {"%q", "`bytes with backs`\n", &bytesVal, []byte("bytes with backs")}, // Renamed types {"%v\n", "true\n", &renamedBoolVal, renamedBool(true)}, {"%t\n", "F\n", &renamedBoolVal, renamedBool(false)}, {"%v", "101\n", &renamedIntVal, renamedInt(101)}, {"%c", "\u0101\n", &renamedIntVal, renamedInt('\u0101')}, {"%o", "0146\n", &renamedIntVal, renamedInt(102)}, {"%v", "103\n", &renamedUintVal, renamedUint(103)}, {"%d", "104\n", &renamedUintVal, renamedUint(104)}, {"%d", "105\n", &renamedInt8Val, renamedInt8(105)}, {"%d", "106\n", &renamedInt16Val, renamedInt16(106)}, {"%d", "107\n", &renamedInt32Val, renamedInt32(107)}, {"%d", "108\n", &renamedInt64Val, renamedInt64(108)}, {"%x", "6D\n", &renamedUint8Val, renamedUint8(109)}, {"%o", "0156\n", &renamedUint16Val, renamedUint16(110)}, {"%d", "111\n", &renamedUint32Val, renamedUint32(111)}, {"%d", "112\n", &renamedUint64Val, renamedUint64(112)}, {"%d", "113\n", &renamedUintptrVal, renamedUintptr(113)}, {"%s", "114\n", &renamedStringVal, renamedString("114")}, {"%q", "\"1155\"\n", &renamedBytesVal, renamedBytes([]byte("1155"))}, {"%g", "116e1\n", &renamedFloat32Val, renamedFloat32(116e1)}, {"%g", "-11.7e+1", &renamedFloat64Val, renamedFloat64(-11.7e+1)}, {"%g", "11+6e1i\n", &renamedComplex64Val, renamedComplex64(11 + 6e1i)}, {"%g", "-11.+7e+1i", &renamedComplex128Val, renamedComplex128(-11. + 7e+1i)}, // Interesting formats {"here is\tthe value:%d", "here is the\tvalue:118\n", &intVal, 118}, {"%% %%:%d", "% %:119\n", &intVal, 119}, {"%d%%", "42%", &intVal, 42}, // %% at end of string. // Corner cases {"%x", "FFFFFFFF\n", &uint32Val, uint32(0xFFFFFFFF)}, // Custom scanner. {"%s", " sss ", &xVal, Xs("sss")}, {"%2s", "sssss", &xVal, Xs("ss")}, // Fixed bugs {"%d\n", "27\n", &intVal, 27}, // ok {"%d\n", "28 \n", &intVal, 28}, // was: "unexpected newline" {"%v", "0", &intVal, 0}, // was: "EOF"; 0 was taken as base prefix and not counted. {"%v", "0", &uintVal, uint(0)}, // was: "EOF"; 0 was taken as base prefix and not counted. {"%c", " ", &uintVal, uint(' ')}, // %c must accept a blank. {"%c", "\t", &uintVal, uint('\t')}, // %c must accept any space. {"%c", "\n", &uintVal, uint('\n')}, // %c must accept any space. {"%d%%", "23%\n", &uintVal, uint(23)}, // %% matches literal %. {"%%%d", "%23\n", &uintVal, uint(23)}, // %% matches literal %. // space handling {"%d", "27", &intVal, 27}, {"%d", "27 ", &intVal, 27}, {"%d", " 27", &intVal, 27}, {"%d", " 27 ", &intVal, 27}, {"X%d", "X27", &intVal, 27}, {"X%d", "X27 ", &intVal, 27}, {"X%d", "X 27", &intVal, 27}, {"X%d", "X 27 ", &intVal, 27}, {"X %d", "X27", &intVal, nil}, // expected space in input to match format {"X %d", "X27 ", &intVal, nil}, // expected space in input to match format {"X %d", "X 27", &intVal, 27}, {"X %d", "X 27 ", &intVal, 27}, {"%dX", "27X", &intVal, 27}, {"%dX", "27 X", &intVal, nil}, // input does not match format {"%dX", " 27X", &intVal, 27}, {"%dX", " 27 X", &intVal, nil}, // input does not match format {"%d X", "27X", &intVal, nil}, // expected space in input to match format {"%d X", "27 X", &intVal, 27}, {"%d X", " 27X", &intVal, nil}, // expected space in input to match format {"%d X", " 27 X", &intVal, 27}, {"X %d X", "X27X", &intVal, nil}, // expected space in input to match format {"X %d X", "X27 X", &intVal, nil}, // expected space in input to match format {"X %d X", "X 27X", &intVal, nil}, // expected space in input to match format {"X %d X", "X 27 X", &intVal, 27}, {"X %s X", "X27X", &stringVal, nil}, // expected space in input to match format {"X %s X", "X27 X", &stringVal, nil}, // expected space in input to match format {"X %s X", "X 27X", &stringVal, nil}, // unexpected EOF {"X %s X", "X 27 X", &stringVal, "27"}, {"X%sX", "X27X", &stringVal, nil}, // unexpected EOF {"X%sX", "X27 X", &stringVal, nil}, // input does not match format {"X%sX", "X 27X", &stringVal, nil}, // unexpected EOF {"X%sX", "X 27 X", &stringVal, nil}, // input does not match format {"X%s", "X27", &stringVal, "27"}, {"X%s", "X27 ", &stringVal, "27"}, {"X%s", "X 27", &stringVal, "27"}, {"X%s", "X 27 ", &stringVal, "27"}, {"X%dX", "X27X", &intVal, 27}, {"X%dX", "X27 X", &intVal, nil}, // input does not match format {"X%dX", "X 27X", &intVal, 27}, {"X%dX", "X 27 X", &intVal, nil}, // input does not match format {"X%dX", "X27X", &intVal, 27}, {"X%dX", "X27X ", &intVal, 27}, {"X%dX", " X27X", &intVal, nil}, // input does not match format {"X%dX", " X27X ", &intVal, nil}, // input does not match format {"X%dX\n", "X27X", &intVal, 27}, {"X%dX \n", "X27X ", &intVal, 27}, {"X%dX\n", "X27X\n", &intVal, 27}, {"X%dX\n", "X27X \n", &intVal, 27}, {"X%dX \n", "X27X", &intVal, 27}, {"X%dX \n", "X27X ", &intVal, 27}, {"X%dX \n", "X27X\n", &intVal, 27}, {"X%dX \n", "X27X \n", &intVal, 27}, {"X%c", "X\n", &runeVal, '\n'}, {"X%c", "X \n", &runeVal, ' '}, {"X %c", "X!", &runeVal, nil}, // expected space in input to match format {"X %c", "X\n", &runeVal, nil}, // newline in input does not match format {"X %c", "X !", &runeVal, '!'}, {"X %c", "X \n", &runeVal, '\n'}, {" X%dX", "X27X", &intVal, nil}, // expected space in input to match format {" X%dX", "X27X ", &intVal, nil}, // expected space in input to match format {" X%dX", " X27X", &intVal, 27}, {" X%dX", " X27X ", &intVal, 27}, {"X%dX ", "X27X", &intVal, 27}, {"X%dX ", "X27X ", &intVal, 27}, {"X%dX ", " X27X", &intVal, nil}, // input does not match format {"X%dX ", " X27X ", &intVal, nil}, // input does not match format {" X%dX ", "X27X", &intVal, nil}, // expected space in input to match format {" X%dX ", "X27X ", &intVal, nil}, // expected space in input to match format {" X%dX ", " X27X", &intVal, 27}, {" X%dX ", " X27X ", &intVal, 27}, {"%d\nX", "27\nX", &intVal, 27}, {"%dX\n X", "27X\n X", &intVal, 27}, } var overflowTests = []ScanTest{ {"128", &int8Val, 0}, {"32768", &int16Val, 0}, {"-129", &int8Val, 0}, {"-32769", &int16Val, 0}, {"256", &uint8Val, 0}, {"65536", &uint16Val, 0}, {"1e100", &float32Val, 0}, {"1e500", &float64Val, 0}, {"(1e100+0i)", &complex64Val, 0}, {"(1+1e100i)", &complex64Val, 0}, {"(1-1e500i)", &complex128Val, 0}, } var truth bool var i, j, k int var f float64 var s, t string var c complex128 var x, y Xs var z IntString var r1, r2, r3 rune var multiTests = []ScanfMultiTest{ {"", "", []any{}, []any{}, ""}, {"%d", "23", args(&i), args(23), ""}, {"%2s%3s", "22333", args(&s, &t), args("22", "333"), ""}, {"%2d%3d", "44555", args(&i, &j), args(44, 555), ""}, {"%2d.%3d", "66.777", args(&i, &j), args(66, 777), ""}, {"%d, %d", "23, 18", args(&i, &j), args(23, 18), ""}, {"%3d22%3d", "33322333", args(&i, &j), args(333, 333), ""}, {"%6vX=%3fY", "3+2iX=2.5Y", args(&c, &f), args((3 + 2i), 2.5), ""}, {"%d%s", "123abc", args(&i, &s), args(123, "abc"), ""}, {"%c%c%c", "2\u50c2X", args(&r1, &r2, &r3), args('2', '\u50c2', 'X'), ""}, {"%5s%d", " 1234567 ", args(&s, &i), args("12345", 67), ""}, {"%5s%d", " 12 34 567 ", args(&s, &i), args("12", 34), ""}, // Custom scanners. {"%e%f", "eefffff", args(&x, &y), args(Xs("ee"), Xs("fffff")), ""}, {"%4v%s", "12abcd", args(&z, &s), args(IntString{12, "ab"}, "cd"), ""}, // Errors {"%t", "23 18", args(&i), nil, "bad verb"}, {"%d %d %d", "23 18", args(&i, &j), args(23, 18), "too few operands"}, {"%d %d", "23 18 27", args(&i, &j, &k), args(23, 18), "too many operands"}, {"%c", "\u0100", args(&int8Val), nil, "overflow"}, {"X%d", "10X", args(&intVal), nil, "input does not match format"}, {"%d%", "42%", args(&intVal), args(42), "missing verb: % at end of format string"}, {"%d% ", "42%", args(&intVal), args(42), "too few operands for format '% '"}, // Slightly odd error, but correct. {"%%%d", "xxx 42", args(&intVal), args(42), "missing literal %"}, {"%%%d", "x42", args(&intVal), args(42), "missing literal %"}, {"%%%d", "42", args(&intVal), args(42), "missing literal %"}, // Bad UTF-8: should see every byte. {"%c%c%c", "\xc2X\xc2", args(&r1, &r2, &r3), args(utf8.RuneError, 'X', utf8.RuneError), ""}, // Fixed bugs {"%v%v", "FALSE23", args(&truth, &i), args(false, 23), ""}, } var readers = []struct { name string f func(string) io.Reader }{ {"StringReader", func(s string) io.Reader { return strings.NewReader(s) }}, {"ReaderOnly", func(s string) io.Reader { return struct{ io.Reader }{strings.NewReader(s)} }}, {"OneByteReader", func(s string) io.Reader { return iotest.OneByteReader(strings.NewReader(s)) }}, {"DataErrReader", func(s string) io.Reader { return iotest.DataErrReader(strings.NewReader(s)) }}, } func testScan(t *testing.T, f func(string) io.Reader, scan func(r io.Reader, a ...any) (int, error)) { for _, test := range scanTests { r := f(test.text) n, err := scan(r, test.in) if err != nil { m := "" if n > 0 { m = Sprintf(" (%d fields ok)", n) } t.Errorf("got error scanning %q: %s%s", test.text, err, m) continue } if n != 1 { t.Errorf("count error on entry %q: got %d", test.text, n) continue } // The incoming value may be a pointer v := reflect.ValueOf(test.in) if p := v; p.Kind() == reflect.Pointer { v = p.Elem() } val := v.Interface() if !reflect.DeepEqual(val, test.out) { t.Errorf("scanning %q: expected %#v got %#v, type %T", test.text, test.out, val, val) } } } func TestScan(t *testing.T) { for _, r := range readers { t.Run(r.name, func(t *testing.T) { testScan(t, r.f, Fscan) }) } } func TestScanln(t *testing.T) { for _, r := range readers { t.Run(r.name, func(t *testing.T) { testScan(t, r.f, Fscanln) }) } } func TestScanf(t *testing.T) { for _, test := range scanfTests { n, err := Sscanf(test.text, test.format, test.in) if err != nil { if test.out != nil { t.Errorf("Sscanf(%q, %q): unexpected error: %v", test.text, test.format, err) } continue } if test.out == nil { t.Errorf("Sscanf(%q, %q): unexpected success", test.text, test.format) continue } if n != 1 { t.Errorf("Sscanf(%q, %q): parsed %d field, want 1", test.text, test.format, n) continue } // The incoming value may be a pointer v := reflect.ValueOf(test.in) if p := v; p.Kind() == reflect.Pointer { v = p.Elem() } val := v.Interface() if !reflect.DeepEqual(val, test.out) { t.Errorf("Sscanf(%q, %q): parsed value %T(%#v), want %T(%#v)", test.text, test.format, val, val, test.out, test.out) } } } func TestScanOverflow(t *testing.T) { // different machines and different types report errors with different strings. re := regexp.MustCompile("overflow|too large|out of range|not representable") for _, test := range overflowTests { _, err := Sscan(test.text, test.in) if err == nil { t.Errorf("expected overflow scanning %q", test.text) continue } if !re.MatchString(err.Error()) { t.Errorf("expected overflow error scanning %q: %s", test.text, err) } } } func verifyNaN(str string, t *testing.T) { var f float64 var f32 float32 var f64 float64 text := str + " " + str + " " + str n, err := Fscan(strings.NewReader(text), &f, &f32, &f64) if err != nil { t.Errorf("got error scanning %q: %s", text, err) } if n != 3 { t.Errorf("count error scanning %q: got %d", text, n) } if !math.IsNaN(float64(f)) || !math.IsNaN(float64(f32)) || !math.IsNaN(f64) { t.Errorf("didn't get NaNs scanning %q: got %g %g %g", text, f, f32, f64) } } func TestNaN(t *testing.T) { for _, s := range []string{"nan", "NAN", "NaN"} { verifyNaN(s, t) } } func verifyInf(str string, t *testing.T) { var f float64 var f32 float32 var f64 float64 text := str + " " + str + " " + str n, err := Fscan(strings.NewReader(text), &f, &f32, &f64) if err != nil { t.Errorf("got error scanning %q: %s", text, err) } if n != 3 { t.Errorf("count error scanning %q: got %d", text, n) } sign := 1 if str[0] == '-' { sign = -1 } if !math.IsInf(float64(f), sign) || !math.IsInf(float64(f32), sign) || !math.IsInf(f64, sign) { t.Errorf("didn't get right Infs scanning %q: got %g %g %g", text, f, f32, f64) } } func TestInf(t *testing.T) { for _, s := range []string{"inf", "+inf", "-inf", "INF", "-INF", "+INF", "Inf", "-Inf", "+Inf"} { verifyInf(s, t) } } func testScanfMulti(t *testing.T, f func(string) io.Reader) { sliceType := reflect.TypeOf(make([]any, 1)) for _, test := range multiTests { r := f(test.text) n, err := Fscanf(r, test.format, test.in...) if err != nil { if test.err == "" { t.Errorf("got error scanning (%q, %q): %q", test.format, test.text, err) } else if !strings.Contains(err.Error(), test.err) { t.Errorf("got wrong error scanning (%q, %q): %q; expected %q", test.format, test.text, err, test.err) } continue } if test.err != "" { t.Errorf("expected error %q error scanning (%q, %q)", test.err, test.format, test.text) } if n != len(test.out) { t.Errorf("count error on entry (%q, %q): expected %d got %d", test.format, test.text, len(test.out), n) continue } // Convert the slice of pointers into a slice of values resultVal := reflect.MakeSlice(sliceType, n, n) for i := 0; i < n; i++ { v := reflect.ValueOf(test.in[i]).Elem() resultVal.Index(i).Set(v) } result := resultVal.Interface() if !reflect.DeepEqual(result, test.out) { t.Errorf("scanning (%q, %q): expected %#v got %#v", test.format, test.text, test.out, result) } } } func TestScanfMulti(t *testing.T) { for _, r := range readers { t.Run(r.name, func(t *testing.T) { testScanfMulti(t, r.f) }) } } func TestScanMultiple(t *testing.T) { var a int var s string n, err := Sscan("123abc", &a, &s) if n != 2 { t.Errorf("Sscan count error: expected 2: got %d", n) } if err != nil { t.Errorf("Sscan expected no error; got %s", err) } if a != 123 || s != "abc" { t.Errorf("Sscan wrong values: got (%d %q) expected (123 \"abc\")", a, s) } n, err = Sscan("asdf", &s, &a) if n != 1 { t.Errorf("Sscan count error: expected 1: got %d", n) } if err == nil { t.Errorf("Sscan expected error; got none: %s", err) } if s != "asdf" { t.Errorf("Sscan wrong values: got %q expected \"asdf\"", s) } } // Empty strings are not valid input when scanning a string. func TestScanEmpty(t *testing.T) { var s1, s2 string n, err := Sscan("abc", &s1, &s2) if n != 1 { t.Errorf("Sscan count error: expected 1: got %d", n) } if err == nil { t.Error("Sscan <one item> expected error; got none") } if s1 != "abc" { t.Errorf("Sscan wrong values: got %q expected \"abc\"", s1) } n, err = Sscan("", &s1, &s2) if n != 0 { t.Errorf("Sscan count error: expected 0: got %d", n) } if err == nil { t.Error("Sscan <empty> expected error; got none") } // Quoted empty string is OK. n, err = Sscanf(`""`, "%q", &s1) if n != 1 { t.Errorf("Sscanf count error: expected 1: got %d", n) } if err != nil { t.Errorf("Sscanf <empty> expected no error with quoted string; got %s", err) } } func TestScanNotPointer(t *testing.T) { r := strings.NewReader("1") var a int _, err := Fscan(r, a) if err == nil { t.Error("expected error scanning non-pointer") } else if !strings.Contains(err.Error(), "pointer") { t.Errorf("expected pointer error scanning non-pointer, got: %s", err) } } func TestScanlnNoNewline(t *testing.T) { var a int _, err := Sscanln("1 x\n", &a) if err == nil { t.Error("expected error scanning string missing newline") } else if !strings.Contains(err.Error(), "newline") { t.Errorf("expected newline error scanning string missing newline, got: %s", err) } } func TestScanlnWithMiddleNewline(t *testing.T) { r := strings.NewReader("123\n456\n") var a, b int _, err := Fscanln(r, &a, &b) if err == nil { t.Error("expected error scanning string with extra newline") } else if !strings.Contains(err.Error(), "newline") { t.Errorf("expected newline error scanning string with extra newline, got: %s", err) } } // eofCounter is a special Reader that counts reads at end of file. type eofCounter struct { reader *strings.Reader eofCount int } func (ec *eofCounter) Read(b []byte) (n int, err error) { n, err = ec.reader.Read(b) if n == 0 { ec.eofCount++ } return } // TestEOF verifies that when we scan, we see at most EOF once per call to a // Scan function, and then only when it's really an EOF. func TestEOF(t *testing.T) { ec := &eofCounter{strings.NewReader("123\n"), 0} var a int n, err := Fscanln(ec, &a) if err != nil { t.Error("unexpected error", err) } if n != 1 { t.Error("expected to scan one item, got", n) } if ec.eofCount != 0 { t.Error("expected zero EOFs", ec.eofCount) ec.eofCount = 0 // reset for next test } n, err = Fscanln(ec, &a) if err == nil { t.Error("expected error scanning empty string") } if n != 0 { t.Error("expected to scan zero items, got", n) } if ec.eofCount != 1 { t.Error("expected one EOF, got", ec.eofCount) } } // TestEOFAtEndOfInput verifies that we see an EOF error if we run out of input. // This was a buglet: we used to get "expected integer". func TestEOFAtEndOfInput(t *testing.T) { var i, j int n, err := Sscanf("23", "%d %d", &i, &j) if n != 1 || i != 23 { t.Errorf("Sscanf expected one value of 23; got %d %d", n, i) } if err != io.EOF { t.Errorf("Sscanf expected EOF; got %q", err) } n, err = Sscan("234", &i, &j) if n != 1 || i != 234 { t.Errorf("Sscan expected one value of 234; got %d %d", n, i) } if err != io.EOF { t.Errorf("Sscan expected EOF; got %q", err) } // Trailing space is tougher. n, err = Sscan("234 ", &i, &j) if n != 1 || i != 234 { t.Errorf("Sscan expected one value of 234; got %d %d", n, i) } if err != io.EOF { t.Errorf("Sscan expected EOF; got %q", err) } } var eofTests = []struct { format string v any }{ {"%s", &stringVal}, {"%q", &stringVal}, {"%x", &stringVal}, {"%v", &stringVal}, {"%v", &bytesVal}, {"%v", &intVal}, {"%v", &uintVal}, {"%v", &boolVal}, {"%v", &float32Val}, {"%v", &complex64Val}, {"%v", &renamedStringVal}, {"%v", &renamedBytesVal}, {"%v", &renamedIntVal}, {"%v", &renamedUintVal}, {"%v", &renamedBoolVal}, {"%v", &renamedFloat32Val}, {"%v", &renamedComplex64Val}, } func TestEOFAllTypes(t *testing.T) { for i, test := range eofTests { if _, err := Sscanf("", test.format, test.v); err != io.EOF { t.Errorf("#%d: %s %T not eof on empty string: %s", i, test.format, test.v, err) } if _, err := Sscanf(" ", test.format, test.v); err != io.EOF { t.Errorf("#%d: %s %T not eof on trailing blanks: %s", i, test.format, test.v, err) } } } // TestUnreadRuneWithBufio verifies that, at least when using bufio, successive // calls to Fscan do not lose runes. func TestUnreadRuneWithBufio(t *testing.T) { r := bufio.NewReader(strings.NewReader("123αb")) var i int var a string n, err := Fscanf(r, "%d", &i) if n != 1 || err != nil { t.Errorf("reading int expected one item, no errors; got %d %q", n, err) } if i != 123 { t.Errorf("expected 123; got %d", i) } n, err = Fscanf(r, "%s", &a) if n != 1 || err != nil { t.Errorf("reading string expected one item, no errors; got %d %q", n, err) } if a != "αb" { t.Errorf("expected αb; got %q", a) } } type TwoLines string // Scan attempts to read two lines into the object. Scanln should prevent this // because it stops at newline; Scan and Scanf should be fine. func (t *TwoLines) Scan(state ScanState, verb rune) error { chars := make([]rune, 0, 100) for nlCount := 0; nlCount < 2; { c, _, err := state.ReadRune() if err != nil { return err } chars = append(chars, c) if c == '\n' { nlCount++ } } *t = TwoLines(string(chars)) return nil } func TestMultiLine(t *testing.T) { input := "abc\ndef\n" // Sscan should work var tscan TwoLines n, err := Sscan(input, &tscan) if n != 1 { t.Errorf("Sscan: expected 1 item; got %d", n) } if err != nil { t.Errorf("Sscan: expected no error; got %s", err) } if string(tscan) != input { t.Errorf("Sscan: expected %q; got %q", input, tscan) } // Sscanf should work var tscanf TwoLines n, err = Sscanf(input, "%s", &tscanf) if n != 1 { t.Errorf("Sscanf: expected 1 item; got %d", n) } if err != nil { t.Errorf("Sscanf: expected no error; got %s", err) } if string(tscanf) != input { t.Errorf("Sscanf: expected %q; got %q", input, tscanf) } // Sscanln should not work var tscanln TwoLines n, err = Sscanln(input, &tscanln) if n != 0 { t.Errorf("Sscanln: expected 0 items; got %d: %q", n, tscanln) } if err == nil { t.Error("Sscanln: expected error; got none") } else if err != io.ErrUnexpectedEOF { t.Errorf("Sscanln: expected io.ErrUnexpectedEOF (ha!); got %s", err) } } // TestLineByLineFscanf tests that Fscanf does not read past newline. Issue // 3481. func TestLineByLineFscanf(t *testing.T) { r := struct{ io.Reader }{strings.NewReader("1\n2\n")} var i, j int n, err := Fscanf(r, "%v\n", &i) if n != 1 || err != nil { t.Fatalf("first read: %d %q", n, err) } n, err = Fscanf(r, "%v\n", &j) if n != 1 || err != nil { t.Fatalf("second read: %d %q", n, err) } if i != 1 || j != 2 { t.Errorf("wrong values; wanted 1 2 got %d %d", i, j) } } // TestScanStateCount verifies the correct byte count is returned. Issue 8512. // runeScanner implements the Scanner interface for TestScanStateCount. type runeScanner struct { rune rune size int } func (rs *runeScanner) Scan(state ScanState, verb rune) error { r, size, err := state.ReadRune() rs.rune = r rs.size = size return err } func TestScanStateCount(t *testing.T) { var a, b, c runeScanner n, err := Sscanf("12➂", "%c%c%c", &a, &b, &c) if err != nil { t.Fatal(err) } if n != 3 { t.Fatalf("expected 3 items consumed, got %d", n) } if a.rune != '1' || b.rune != '2' || c.rune != '➂' { t.Errorf("bad scan rune: %q %q %q should be '1' '2' '➂'", a.rune, b.rune, c.rune) } if a.size != 1 || b.size != 1 || c.size != 3 { t.Errorf("bad scan size: %q %q %q should be 1 1 3", a.size, b.size, c.size) } } // RecursiveInt accepts a string matching %d.%d.%d.... // and parses it into a linked list. // It allows us to benchmark recursive descent style scanners. type RecursiveInt struct { i int next *RecursiveInt } func (r *RecursiveInt) Scan(state ScanState, verb rune) (err error) { _, err = Fscan(state, &r.i) if err != nil { return } next := new(RecursiveInt) _, err = Fscanf(state, ".%v", next) if err != nil { if err == io.ErrUnexpectedEOF { err = nil } return } r.next = next return } // scanInts performs the same scanning task as RecursiveInt.Scan // but without recurring through scanner, so we can compare // performance more directly. func scanInts(r *RecursiveInt, b *bytes.Buffer) (err error) { r.next = nil _, err = Fscan(b, &r.i) if err != nil { return } c, _, err := b.ReadRune() if err != nil { if err == io.EOF { err = nil } return } if c != '.' { return } next := new(RecursiveInt) err = scanInts(next, b) if err == nil { r.next = next } return } func makeInts(n int) []byte { var buf bytes.Buffer Fprintf(&buf, "1") for i := 1; i < n; i++ { Fprintf(&buf, ".%d", i+1) } return buf.Bytes() } func TestScanInts(t *testing.T) { testScanInts(t, scanInts) testScanInts(t, func(r *RecursiveInt, b *bytes.Buffer) (err error) { _, err = Fscan(b, r) return }) } // 800 is small enough to not overflow the stack when using gccgo on a // platform that does not support split stack. const intCount = 800 func testScanInts(t *testing.T, scan func(*RecursiveInt, *bytes.Buffer) error) { r := new(RecursiveInt) ints := makeInts(intCount) buf := bytes.NewBuffer(ints) err := scan(r, buf) if err != nil { t.Error("unexpected error", err) } i := 1 for ; r != nil; r = r.next { if r.i != i { t.Fatalf("bad scan: expected %d got %d", i, r.i) } i++ } if i-1 != intCount { t.Fatalf("bad scan count: expected %d got %d", intCount, i-1) } } func BenchmarkScanInts(b *testing.B) { b.StopTimer() ints := makeInts(intCount) var r RecursiveInt for i := 0; i < b.N; i++ { buf := bytes.NewBuffer(ints) b.StartTimer() scanInts(&r, buf) b.StopTimer() } } func BenchmarkScanRecursiveInt(b *testing.B) { b.StopTimer() ints := makeInts(intCount) var r RecursiveInt for i := 0; i < b.N; i++ { buf := bytes.NewBuffer(ints) b.StartTimer() Fscan(buf, &r) b.StopTimer() } } func BenchmarkScanRecursiveIntReaderWrapper(b *testing.B) { b.StopTimer() ints := makeInts(intCount) var r RecursiveInt for i := 0; i < b.N; i++ { buf := struct{ io.Reader }{strings.NewReader(string(ints))} b.StartTimer() Fscan(buf, &r) b.StopTimer() } } // Issue 9124. // %x on bytes couldn't handle non-space bytes terminating the scan. func TestHexBytes(t *testing.T) { var a, b []byte n, err := Sscanf("00010203", "%x", &a) if n != 1 || err != nil { t.Errorf("simple: got count, err = %d, %v; expected 1, nil", n, err) } check := func(msg string, x []byte) { if len(x) != 4 { t.Errorf("%s: bad length %d", msg, len(x)) } for i, b := range x { if int(b) != i { t.Errorf("%s: bad x[%d] = %x", msg, i, x[i]) } } } check("simple", a) a = nil n, err = Sscanf("00010203 00010203", "%x %x", &a, &b) if n != 2 || err != nil { t.Errorf("simple pair: got count, err = %d, %v; expected 2, nil", n, err) } check("simple pair a", a) check("simple pair b", b) a = nil b = nil n, err = Sscanf("00010203:", "%x", &a) if n != 1 || err != nil { t.Errorf("colon: got count, err = %d, %v; expected 1, nil", n, err) } check("colon", a) a = nil n, err = Sscanf("00010203:00010203", "%x:%x", &a, &b) if n != 2 || err != nil { t.Errorf("colon pair: got count, err = %d, %v; expected 2, nil", n, err) } check("colon pair a", a) check("colon pair b", b) a = nil b = nil // This one fails because there is a hex byte after the data, // that is, an odd number of hex input bytes. n, err = Sscanf("000102034:", "%x", &a) if n != 0 || err == nil { t.Errorf("odd count: got count, err = %d, %v; expected 0, error", n, err) } } func TestScanNewlinesAreSpaces(t *testing.T) { var a, b int var tests = []struct { name string text string count int }{ {"newlines", "1\n2\n", 2}, {"no final newline", "1\n2", 2}, {"newlines with spaces ", "1 \n 2 \n", 2}, {"no final newline with spaces", "1 \n 2", 2}, } for _, test := range tests { n, err := Sscan(test.text, &a, &b) if n != test.count { t.Errorf("%s: expected to scan %d item(s), scanned %d", test.name, test.count, n) } if err != nil { t.Errorf("%s: unexpected error: %s", test.name, err) } } } func TestScanlnNewlinesTerminate(t *testing.T) { var a, b int var tests = []struct { name string text string count int ok bool }{ {"one line one item", "1\n", 1, false}, {"one line two items with spaces ", " 1 2 \n", 2, true}, {"one line two items no newline", " 1 2", 2, true}, {"two lines two items", "1\n2\n", 1, false}, } for _, test := range tests { n, err := Sscanln(test.text, &a, &b) if n != test.count { t.Errorf("%s: expected to scan %d item(s), scanned %d", test.name, test.count, n) } if test.ok && err != nil { t.Errorf("%s: unexpected error: %s", test.name, err) } if !test.ok && err == nil { t.Errorf("%s: expected error; got none", test.name) } } } func TestScanfNewlineMatchFormat(t *testing.T) { var a, b int var tests = []struct { name string text string format string count int ok bool }{ {"newline in both", "1\n2", "%d\n%d\n", 2, true}, {"newline in input", "1\n2", "%d %d", 1, false}, {"space-newline in input", "1 \n2", "%d %d", 1, false}, {"newline in format", "1 2", "%d\n%d", 1, false}, {"space-newline in format", "1 2", "%d \n%d", 1, false}, {"space-newline in both", "1 \n2", "%d \n%d", 2, true}, {"extra space in format", "1\n2", "%d\n %d", 2, true}, {"two extra spaces in format", "1\n2", "%d \n %d", 2, true}, {"space vs newline 0000", "1\n2", "%d\n%d", 2, true}, {"space vs newline 0001", "1\n2", "%d\n %d", 2, true}, {"space vs newline 0010", "1\n2", "%d \n%d", 2, true}, {"space vs newline 0011", "1\n2", "%d \n %d", 2, true}, {"space vs newline 0100", "1\n 2", "%d\n%d", 2, true}, {"space vs newline 0101", "1\n 2", "%d\n%d ", 2, true}, {"space vs newline 0110", "1\n 2", "%d \n%d", 2, true}, {"space vs newline 0111", "1\n 2", "%d \n %d", 2, true}, {"space vs newline 1000", "1 \n2", "%d\n%d", 2, true}, {"space vs newline 1001", "1 \n2", "%d\n %d", 2, true}, {"space vs newline 1010", "1 \n2", "%d \n%d", 2, true}, {"space vs newline 1011", "1 \n2", "%d \n %d", 2, true}, {"space vs newline 1100", "1 \n 2", "%d\n%d", 2, true}, {"space vs newline 1101", "1 \n 2", "%d\n %d", 2, true}, {"space vs newline 1110", "1 \n 2", "%d \n%d", 2, true}, {"space vs newline 1111", "1 \n 2", "%d \n %d", 2, true}, {"space vs newline no-percent 0000", "1\n2", "1\n2", 0, true}, {"space vs newline no-percent 0001", "1\n2", "1\n 2", 0, true}, {"space vs newline no-percent 0010", "1\n2", "1 \n2", 0, true}, {"space vs newline no-percent 0011", "1\n2", "1 \n 2", 0, true}, {"space vs newline no-percent 0100", "1\n 2", "1\n2", 0, false}, // fails: space after nl in input but not pattern {"space vs newline no-percent 0101", "1\n 2", "1\n2 ", 0, false}, // fails: space after nl in input but not pattern {"space vs newline no-percent 0110", "1\n 2", "1 \n2", 0, false}, // fails: space after nl in input but not pattern {"space vs newline no-percent 0111", "1\n 2", "1 \n 2", 0, true}, {"space vs newline no-percent 1000", "1 \n2", "1\n2", 0, true}, {"space vs newline no-percent 1001", "1 \n2", "1\n 2", 0, true}, {"space vs newline no-percent 1010", "1 \n2", "1 \n2", 0, true}, {"space vs newline no-percent 1011", "1 \n2", "1 \n 2", 0, true}, {"space vs newline no-percent 1100", "1 \n 2", "1\n2", 0, false}, // fails: space after nl in input but not pattern {"space vs newline no-percent 1101", "1 \n 2", "1\n 2", 0, true}, {"space vs newline no-percent 1110", "1 \n 2", "1 \n2", 0, false}, // fails: space after nl in input but not pattern {"space vs newline no-percent 1111", "1 \n 2", "1 \n 2", 0, true}, } for _, test := range tests { var n int var err error if strings.Contains(test.format, "%") { n, err = Sscanf(test.text, test.format, &a, &b) } else { n, err = Sscanf(test.text, test.format) } if n != test.count { t.Errorf("%s: expected to scan %d item(s), scanned %d", test.name, test.count, n) } if test.ok && err != nil { t.Errorf("%s: unexpected error: %s", test.name, err) } if !test.ok && err == nil { t.Errorf("%s: expected error; got none", test.name) } } } // Test for issue 12090: Was unreading at EOF, double-scanning a byte. type hexBytes [2]byte func (h *hexBytes) Scan(ss ScanState, verb rune) error { var b []byte _, err := Fscanf(ss, "%4x", &b) if err != nil { panic(err) // Really shouldn't happen. } copy((*h)[:], b) return err } func TestHexByte(t *testing.T) { var h hexBytes n, err := Sscanln("0123\n", &h) if err != nil { t.Fatal(err) } if n != 1 { t.Fatalf("expected 1 item; scanned %d", n) } if h[0] != 0x01 || h[1] != 0x23 { t.Fatalf("expected 0123 got %x", h) } }
go/src/fmt/scan_test.go/0
{ "file_path": "go/src/fmt/scan_test.go", "repo_id": "go", "token_count": 18163 }
243
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ast import ( "strings" "testing" ) var tests = []struct { x any // x is printed as s s string }{ // basic types {nil, "0 nil"}, {true, "0 true"}, {42, "0 42"}, {3.14, "0 3.14"}, {1 + 2.718i, "0 (1+2.718i)"}, {"foobar", "0 \"foobar\""}, // maps {map[Expr]string{}, `0 map[ast.Expr]string (len = 0) {}`}, {map[string]int{"a": 1}, `0 map[string]int (len = 1) { 1 . "a": 1 2 }`}, // pointers {new(int), "0 *0"}, // arrays {[0]int{}, `0 [0]int {}`}, {[3]int{1, 2, 3}, `0 [3]int { 1 . 0: 1 2 . 1: 2 3 . 2: 3 4 }`}, {[...]int{42}, `0 [1]int { 1 . 0: 42 2 }`}, // slices {[]int{}, `0 []int (len = 0) {}`}, {[]int{1, 2, 3}, `0 []int (len = 3) { 1 . 0: 1 2 . 1: 2 3 . 2: 3 4 }`}, // structs {struct{}{}, `0 struct {} {}`}, {struct{ x int }{007}, `0 struct { x int } {}`}, {struct{ X, y int }{42, 991}, `0 struct { X int; y int } { 1 . X: 42 2 }`}, {struct{ X, Y int }{42, 991}, `0 struct { X int; Y int } { 1 . X: 42 2 . Y: 991 3 }`}, } // Split s into lines, trim whitespace from all lines, and return // the concatenated non-empty lines. func trim(s string) string { lines := strings.Split(s, "\n") i := 0 for _, line := range lines { line = strings.TrimSpace(line) if line != "" { lines[i] = line i++ } } return strings.Join(lines[0:i], "\n") } func TestPrint(t *testing.T) { var buf strings.Builder for _, test := range tests { buf.Reset() if err := Fprint(&buf, nil, test.x, nil); err != nil { t.Errorf("Fprint failed: %s", err) } if s, ts := trim(buf.String()), trim(test.s); s != ts { t.Errorf("got:\n%s\nexpected:\n%s\n", s, ts) } } }
go/src/go/ast/print_test.go/0
{ "file_path": "go/src/go/ast/print_test.go", "repo_id": "go", "token_count": 905 }
244
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package build import ( "fmt" "go/token" "io" "strings" "testing" ) const quote = "`" type readTest struct { // Test input contains ℙ where readGoInfo should stop. in string err string } var readGoInfoTests = []readTest{ { `package p`, "", }, { `package p; import "x"`, "", }, { `package p; import . "x"`, "", }, { `package p; import "x";ℙvar x = 1`, "", }, { `package p // comment import "x" import _ "x" import a "x" /* comment */ import ( "x" /* comment */ _ "x" a "x" // comment ` + quote + `x` + quote + ` _ /*comment*/ ` + quote + `x` + quote + ` a ` + quote + `x` + quote + ` ) import ( ) import () import()import()import() import();import();import() ℙvar x = 1 `, "", }, { "\ufeff𝔻" + `package p; import "x";ℙvar x = 1`, "", }, } var readCommentsTests = []readTest{ { `ℙpackage p`, "", }, { `ℙpackage p; import "x"`, "", }, { `ℙpackage p; import . "x"`, "", }, { "\ufeff𝔻" + `ℙpackage p; import . "x"`, "", }, { `// foo /* bar */ /* quux */ // baz /*/ zot */ // asdf ℙHello, world`, "", }, { "\ufeff𝔻" + `// foo /* bar */ /* quux */ // baz /*/ zot */ // asdf ℙHello, world`, "", }, } func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) { for i, tt := range tests { beforeP, afterP, _ := strings.Cut(tt.in, "ℙ") in := beforeP + afterP testOut := beforeP if beforeD, afterD, ok := strings.Cut(beforeP, "𝔻"); ok { in = beforeD + afterD + afterP testOut = afterD } r := strings.NewReader(in) buf, err := read(r) if err != nil { if tt.err == "" { t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf)) } else if !strings.Contains(err.Error(), tt.err) { t.Errorf("#%d: err=%q, expected %q", i, err, tt.err) } continue } if tt.err != "" { t.Errorf("#%d: success, expected %q", i, tt.err) continue } out := string(buf) if out != testOut { t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut) } } } func TestReadGoInfo(t *testing.T) { testRead(t, readGoInfoTests, func(r io.Reader) ([]byte, error) { var info fileInfo err := readGoInfo(r, &info) return info.header, err }) } func TestReadComments(t *testing.T) { testRead(t, readCommentsTests, readComments) } var readFailuresTests = []readTest{ { `package`, "syntax error", }, { "package p\n\x00\nimport `math`\n", "unexpected NUL in input", }, { `package p; import`, "syntax error", }, { `package p; import "`, "syntax error", }, { "package p; import ` \n\n", "syntax error", }, { `package p; import "x`, "syntax error", }, { `package p; import _`, "syntax error", }, { `package p; import _ "`, "syntax error", }, { `package p; import _ "x`, "syntax error", }, { `package p; import .`, "syntax error", }, { `package p; import . "`, "syntax error", }, { `package p; import . "x`, "syntax error", }, { `package p; import (`, "syntax error", }, { `package p; import ("`, "syntax error", }, { `package p; import ("x`, "syntax error", }, { `package p; import ("x"`, "syntax error", }, } func TestReadFailuresIgnored(t *testing.T) { // Syntax errors should not be reported (false arg to readImports). // Instead, entire file should be the output and no error. // Convert tests not to return syntax errors. tests := make([]readTest, len(readFailuresTests)) copy(tests, readFailuresTests) for i := range tests { tt := &tests[i] if !strings.Contains(tt.err, "NUL") { tt.err = "" } } testRead(t, tests, func(r io.Reader) ([]byte, error) { var info fileInfo err := readGoInfo(r, &info) return info.header, err }) } var readEmbedTests = []struct { in, out string }{ { "package p\n", "", }, { "package p\nimport \"embed\"\nvar i int\n//go:embed x y z\nvar files embed.FS", `test:4:12:x test:4:14:y test:4:16:z`, }, { "package p\nimport \"embed\"\nvar i int\n//go:embed x \"\\x79\" `z`\nvar files embed.FS", `test:4:12:x test:4:14:y test:4:21:z`, }, { "package p\nimport \"embed\"\nvar i int\n//go:embed x y\n//go:embed z\nvar files embed.FS", `test:4:12:x test:4:14:y test:5:12:z`, }, { "package p\nimport \"embed\"\nvar i int\n\t //go:embed x y\n\t //go:embed z\n\t var files embed.FS", `test:4:14:x test:4:16:y test:5:14:z`, }, { "package p\nimport \"embed\"\n//go:embed x y z\nvar files embed.FS", `test:3:12:x test:3:14:y test:3:16:z`, }, { "\ufeffpackage p\nimport \"embed\"\n//go:embed x y z\nvar files embed.FS", `test:3:12:x test:3:14:y test:3:16:z`, }, { "package p\nimport \"embed\"\nvar s = \"/*\"\n//go:embed x\nvar files embed.FS", `test:4:12:x`, }, { `package p import "embed" var s = "\"\\\\" //go:embed x var files embed.FS`, `test:4:15:x`, }, { "package p\nimport \"embed\"\nvar s = `/*`\n//go:embed x\nvar files embed.FS", `test:4:12:x`, }, { "package p\nimport \"embed\"\nvar s = z/ *y\n//go:embed pointer\nvar pointer embed.FS", "test:4:12:pointer", }, { "package p\n//go:embed x y z\n", // no import, no scan "", }, { "package p\n//go:embed x y z\nvar files embed.FS", // no import, no scan "", }, { "\ufeffpackage p\n//go:embed x y z\nvar files embed.FS", // no import, no scan "", }, } func TestReadEmbed(t *testing.T) { fset := token.NewFileSet() for i, tt := range readEmbedTests { info := fileInfo{ name: "test", fset: fset, } err := readGoInfo(strings.NewReader(tt.in), &info) if err != nil { t.Errorf("#%d: %v", i, err) continue } b := &strings.Builder{} sep := "" for _, emb := range info.embeds { fmt.Fprintf(b, "%s%v:%s", sep, emb.pos, emb.pattern) sep = "\n" } got := b.String() want := strings.Join(strings.Fields(tt.out), "\n") if got != want { t.Errorf("#%d: embeds:\n%s\nwant:\n%s", i, got, want) } } }
go/src/go/build/read_test.go/0
{ "file_path": "go/src/go/build/read_test.go", "repo_id": "go", "token_count": 2933 }
245
package doc
go/src/go/build/testdata/doc/c_test.go/0
{ "file_path": "go/src/go/build/testdata/doc/c_test.go", "repo_id": "go", "token_count": 3 }
246
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package constant implements Values representing untyped // Go constants and their corresponding operations. // // A special Unknown value may be used when a value // is unknown due to an error. Operations on unknown // values produce unknown values unless specified // otherwise. package constant import ( "fmt" "go/token" "math" "math/big" "math/bits" "strconv" "strings" "sync" "unicode/utf8" ) //go:generate stringer -type Kind // Kind specifies the kind of value represented by a [Value]. type Kind int const ( // unknown values Unknown Kind = iota // non-numeric values Bool String // numeric values Int Float Complex ) // A Value represents the value of a Go constant. type Value interface { // Kind returns the value kind. Kind() Kind // String returns a short, quoted (human-readable) form of the value. // For numeric values, the result may be an approximation; // for String values the result may be a shortened string. // Use ExactString for a string representing a value exactly. String() string // ExactString returns an exact, quoted (human-readable) form of the value. // If the Value is of Kind String, use StringVal to obtain the unquoted string. ExactString() string // Prevent external implementations. implementsValue() } // ---------------------------------------------------------------------------- // Implementations // Maximum supported mantissa precision. // The spec requires at least 256 bits; typical implementations use 512 bits. const prec = 512 // TODO(gri) Consider storing "error" information in an unknownVal so clients // can provide better error messages. For instance, if a number is // too large (incl. infinity), that could be recorded in unknownVal. // See also #20583 and #42695 for use cases. // Representation of values: // // Values of Int and Float Kind have two different representations each: int64Val // and intVal, and ratVal and floatVal. When possible, the "smaller", respectively // more precise (for Floats) representation is chosen. However, once a Float value // is represented as a floatVal, any subsequent results remain floatVals (unless // explicitly converted); i.e., no attempt is made to convert a floatVal back into // a ratVal. The reasoning is that all representations but floatVal are mathematically // exact, but once that precision is lost (by moving to floatVal), moving back to // a different representation implies a precision that's not actually there. type ( unknownVal struct{} boolVal bool stringVal struct { // Lazy value: either a string (l,r==nil) or an addition (l,r!=nil). mu sync.Mutex s string l, r *stringVal } int64Val int64 // Int values representable as an int64 intVal struct{ val *big.Int } // Int values not representable as an int64 ratVal struct{ val *big.Rat } // Float values representable as a fraction floatVal struct{ val *big.Float } // Float values not representable as a fraction complexVal struct{ re, im Value } ) func (unknownVal) Kind() Kind { return Unknown } func (boolVal) Kind() Kind { return Bool } func (*stringVal) Kind() Kind { return String } func (int64Val) Kind() Kind { return Int } func (intVal) Kind() Kind { return Int } func (ratVal) Kind() Kind { return Float } func (floatVal) Kind() Kind { return Float } func (complexVal) Kind() Kind { return Complex } func (unknownVal) String() string { return "unknown" } func (x boolVal) String() string { return strconv.FormatBool(bool(x)) } // String returns a possibly shortened quoted form of the String value. func (x *stringVal) String() string { const maxLen = 72 // a reasonable length s := strconv.Quote(x.string()) if utf8.RuneCountInString(s) > maxLen { // The string without the enclosing quotes is greater than maxLen-2 runes // long. Remove the last 3 runes (including the closing '"') by keeping // only the first maxLen-3 runes; then add "...". i := 0 for n := 0; n < maxLen-3; n++ { _, size := utf8.DecodeRuneInString(s[i:]) i += size } s = s[:i] + "..." } return s } // string constructs and returns the actual string literal value. // If x represents an addition, then it rewrites x to be a single // string, to speed future calls. This lazy construction avoids // building different string values for all subpieces of a large // concatenation. See golang.org/issue/23348. func (x *stringVal) string() string { x.mu.Lock() if x.l != nil { x.s = strings.Join(reverse(x.appendReverse(nil)), "") x.l = nil x.r = nil } s := x.s x.mu.Unlock() return s } // reverse reverses x in place and returns it. func reverse(x []string) []string { n := len(x) for i := 0; i+i < n; i++ { x[i], x[n-1-i] = x[n-1-i], x[i] } return x } // appendReverse appends to list all of x's subpieces, but in reverse, // and returns the result. Appending the reversal allows processing // the right side in a recursive call and the left side in a loop. // Because a chain like a + b + c + d + e is actually represented // as ((((a + b) + c) + d) + e), the left-side loop avoids deep recursion. // x must be locked. func (x *stringVal) appendReverse(list []string) []string { y := x for y.r != nil { y.r.mu.Lock() list = y.r.appendReverse(list) y.r.mu.Unlock() l := y.l if y != x { y.mu.Unlock() } l.mu.Lock() y = l } s := y.s if y != x { y.mu.Unlock() } return append(list, s) } func (x int64Val) String() string { return strconv.FormatInt(int64(x), 10) } func (x intVal) String() string { return x.val.String() } func (x ratVal) String() string { return rtof(x).String() } // String returns a decimal approximation of the Float value. func (x floatVal) String() string { f := x.val // Don't try to convert infinities (will not terminate). if f.IsInf() { return f.String() } // Use exact fmt formatting if in float64 range (common case): // proceed if f doesn't underflow to 0 or overflow to inf. if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) { s := fmt.Sprintf("%.6g", x) if !f.IsInt() && strings.IndexByte(s, '.') < 0 { // f is not an integer, but its string representation // doesn't reflect that. Use more digits. See issue 56220. s = fmt.Sprintf("%g", x) } return s } // Out of float64 range. Do approximate manual to decimal // conversion to avoid precise but possibly slow Float // formatting. // f = mant * 2**exp var mant big.Float exp := f.MantExp(&mant) // 0.5 <= |mant| < 1.0 // approximate float64 mantissa m and decimal exponent d // f ~ m * 10**d m, _ := mant.Float64() // 0.5 <= |m| < 1.0 d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2) // adjust m for truncated (integer) decimal exponent e e := int64(d) m *= math.Pow(10, d-float64(e)) // ensure 1 <= |m| < 10 switch am := math.Abs(m); { case am < 1-0.5e-6: // The %.6g format below rounds m to 5 digits after the // decimal point. Make sure that m*10 < 10 even after // rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6. m *= 10 e-- case am >= 10: m /= 10 e++ } return fmt.Sprintf("%.6ge%+d", m, e) } func (x complexVal) String() string { return fmt.Sprintf("(%s + %si)", x.re, x.im) } func (x unknownVal) ExactString() string { return x.String() } func (x boolVal) ExactString() string { return x.String() } func (x *stringVal) ExactString() string { return strconv.Quote(x.string()) } func (x int64Val) ExactString() string { return x.String() } func (x intVal) ExactString() string { return x.String() } func (x ratVal) ExactString() string { r := x.val if r.IsInt() { return r.Num().String() } return r.String() } func (x floatVal) ExactString() string { return x.val.Text('p', 0) } func (x complexVal) ExactString() string { return fmt.Sprintf("(%s + %si)", x.re.ExactString(), x.im.ExactString()) } func (unknownVal) implementsValue() {} func (boolVal) implementsValue() {} func (*stringVal) implementsValue() {} func (int64Val) implementsValue() {} func (ratVal) implementsValue() {} func (intVal) implementsValue() {} func (floatVal) implementsValue() {} func (complexVal) implementsValue() {} func newInt() *big.Int { return new(big.Int) } func newRat() *big.Rat { return new(big.Rat) } func newFloat() *big.Float { return new(big.Float).SetPrec(prec) } func i64toi(x int64Val) intVal { return intVal{newInt().SetInt64(int64(x))} } func i64tor(x int64Val) ratVal { return ratVal{newRat().SetInt64(int64(x))} } func i64tof(x int64Val) floatVal { return floatVal{newFloat().SetInt64(int64(x))} } func itor(x intVal) ratVal { return ratVal{newRat().SetInt(x.val)} } func itof(x intVal) floatVal { return floatVal{newFloat().SetInt(x.val)} } func rtof(x ratVal) floatVal { return floatVal{newFloat().SetRat(x.val)} } func vtoc(x Value) complexVal { return complexVal{x, int64Val(0)} } func makeInt(x *big.Int) Value { if x.IsInt64() { return int64Val(x.Int64()) } return intVal{x} } func makeRat(x *big.Rat) Value { a := x.Num() b := x.Denom() if smallInt(a) && smallInt(b) { // ok to remain fraction return ratVal{x} } // components too large => switch to float return floatVal{newFloat().SetRat(x)} } var floatVal0 = floatVal{newFloat()} func makeFloat(x *big.Float) Value { // convert -0 if x.Sign() == 0 { return floatVal0 } if x.IsInf() { return unknownVal{} } // No attempt is made to "go back" to ratVal, even if possible, // to avoid providing the illusion of a mathematically exact // representation. return floatVal{x} } func makeComplex(re, im Value) Value { if re.Kind() == Unknown || im.Kind() == Unknown { return unknownVal{} } return complexVal{re, im} } func makeFloatFromLiteral(lit string) Value { if f, ok := newFloat().SetString(lit); ok { if smallFloat(f) { // ok to use rationals if f.Sign() == 0 { // Issue 20228: If the float underflowed to zero, parse just "0". // Otherwise, lit might contain a value with a large negative exponent, // such as -6e-1886451601. As a float, that will underflow to 0, // but it'll take forever to parse as a Rat. lit = "0" } if r, ok := newRat().SetString(lit); ok { return ratVal{r} } } // otherwise use floats return makeFloat(f) } return nil } // Permit fractions with component sizes up to maxExp // before switching to using floating-point numbers. const maxExp = 4 << 10 // smallInt reports whether x would lead to "reasonably"-sized fraction // if converted to a *big.Rat. func smallInt(x *big.Int) bool { return x.BitLen() < maxExp } // smallFloat64 reports whether x would lead to "reasonably"-sized fraction // if converted to a *big.Rat. func smallFloat64(x float64) bool { if math.IsInf(x, 0) { return false } _, e := math.Frexp(x) return -maxExp < e && e < maxExp } // smallFloat reports whether x would lead to "reasonably"-sized fraction // if converted to a *big.Rat. func smallFloat(x *big.Float) bool { if x.IsInf() { return false } e := x.MantExp(nil) return -maxExp < e && e < maxExp } // ---------------------------------------------------------------------------- // Factories // MakeUnknown returns the [Unknown] value. func MakeUnknown() Value { return unknownVal{} } // MakeBool returns the [Bool] value for b. func MakeBool(b bool) Value { return boolVal(b) } // MakeString returns the [String] value for s. func MakeString(s string) Value { if s == "" { return &emptyString // common case } return &stringVal{s: s} } var emptyString stringVal // MakeInt64 returns the [Int] value for x. func MakeInt64(x int64) Value { return int64Val(x) } // MakeUint64 returns the [Int] value for x. func MakeUint64(x uint64) Value { if x < 1<<63 { return int64Val(int64(x)) } return intVal{newInt().SetUint64(x)} } // MakeFloat64 returns the [Float] value for x. // If x is -0.0, the result is 0.0. // If x is not finite, the result is an [Unknown]. func MakeFloat64(x float64) Value { if math.IsInf(x, 0) || math.IsNaN(x) { return unknownVal{} } if smallFloat64(x) { return ratVal{newRat().SetFloat64(x + 0)} // convert -0 to 0 } return floatVal{newFloat().SetFloat64(x + 0)} } // MakeFromLiteral returns the corresponding integer, floating-point, // imaginary, character, or string value for a Go literal string. The // tok value must be one of [token.INT], [token.FLOAT], [token.IMAG], // [token.CHAR], or [token.STRING]. The final argument must be zero. // If the literal string syntax is invalid, the result is an [Unknown]. func MakeFromLiteral(lit string, tok token.Token, zero uint) Value { if zero != 0 { panic("MakeFromLiteral called with non-zero last argument") } switch tok { case token.INT: if x, err := strconv.ParseInt(lit, 0, 64); err == nil { return int64Val(x) } if x, ok := newInt().SetString(lit, 0); ok { return intVal{x} } case token.FLOAT: if x := makeFloatFromLiteral(lit); x != nil { return x } case token.IMAG: if n := len(lit); n > 0 && lit[n-1] == 'i' { if im := makeFloatFromLiteral(lit[:n-1]); im != nil { return makeComplex(int64Val(0), im) } } case token.CHAR: if n := len(lit); n >= 2 { if code, _, _, err := strconv.UnquoteChar(lit[1:n-1], '\''); err == nil { return MakeInt64(int64(code)) } } case token.STRING: if s, err := strconv.Unquote(lit); err == nil { return MakeString(s) } default: panic(fmt.Sprintf("%v is not a valid token", tok)) } return unknownVal{} } // ---------------------------------------------------------------------------- // Accessors // // For unknown arguments the result is the zero value for the respective // accessor type, except for Sign, where the result is 1. // BoolVal returns the Go boolean value of x, which must be a [Bool] or an [Unknown]. // If x is [Unknown], the result is false. func BoolVal(x Value) bool { switch x := x.(type) { case boolVal: return bool(x) case unknownVal: return false default: panic(fmt.Sprintf("%v not a Bool", x)) } } // StringVal returns the Go string value of x, which must be a [String] or an [Unknown]. // If x is [Unknown], the result is "". func StringVal(x Value) string { switch x := x.(type) { case *stringVal: return x.string() case unknownVal: return "" default: panic(fmt.Sprintf("%v not a String", x)) } } // Int64Val returns the Go int64 value of x and whether the result is exact; // x must be an [Int] or an [Unknown]. If the result is not exact, its value is undefined. // If x is [Unknown], the result is (0, false). func Int64Val(x Value) (int64, bool) { switch x := x.(type) { case int64Val: return int64(x), true case intVal: return x.val.Int64(), false // not an int64Val and thus not exact case unknownVal: return 0, false default: panic(fmt.Sprintf("%v not an Int", x)) } } // Uint64Val returns the Go uint64 value of x and whether the result is exact; // x must be an [Int] or an [Unknown]. If the result is not exact, its value is undefined. // If x is [Unknown], the result is (0, false). func Uint64Val(x Value) (uint64, bool) { switch x := x.(type) { case int64Val: return uint64(x), x >= 0 case intVal: return x.val.Uint64(), x.val.IsUint64() case unknownVal: return 0, false default: panic(fmt.Sprintf("%v not an Int", x)) } } // Float32Val is like [Float64Val] but for float32 instead of float64. func Float32Val(x Value) (float32, bool) { switch x := x.(type) { case int64Val: f := float32(x) return f, int64Val(f) == x case intVal: f, acc := newFloat().SetInt(x.val).Float32() return f, acc == big.Exact case ratVal: return x.val.Float32() case floatVal: f, acc := x.val.Float32() return f, acc == big.Exact case unknownVal: return 0, false default: panic(fmt.Sprintf("%v not a Float", x)) } } // Float64Val returns the nearest Go float64 value of x and whether the result is exact; // x must be numeric or an [Unknown], but not [Complex]. For values too small (too close to 0) // to represent as float64, [Float64Val] silently underflows to 0. The result sign always // matches the sign of x, even for 0. // If x is [Unknown], the result is (0, false). func Float64Val(x Value) (float64, bool) { switch x := x.(type) { case int64Val: f := float64(int64(x)) return f, int64Val(f) == x case intVal: f, acc := newFloat().SetInt(x.val).Float64() return f, acc == big.Exact case ratVal: return x.val.Float64() case floatVal: f, acc := x.val.Float64() return f, acc == big.Exact case unknownVal: return 0, false default: panic(fmt.Sprintf("%v not a Float", x)) } } // Val returns the underlying value for a given constant. Since it returns an // interface, it is up to the caller to type assert the result to the expected // type. The possible dynamic return types are: // // x Kind type of result // ----------------------------------------- // Bool bool // String string // Int int64 or *big.Int // Float *big.Float or *big.Rat // everything else nil func Val(x Value) any { switch x := x.(type) { case boolVal: return bool(x) case *stringVal: return x.string() case int64Val: return int64(x) case intVal: return x.val case ratVal: return x.val case floatVal: return x.val default: return nil } } // Make returns the [Value] for x. // // type of x result Kind // ---------------------------- // bool Bool // string String // int64 Int // *big.Int Int // *big.Float Float // *big.Rat Float // anything else Unknown func Make(x any) Value { switch x := x.(type) { case bool: return boolVal(x) case string: return &stringVal{s: x} case int64: return int64Val(x) case *big.Int: return makeInt(x) case *big.Rat: return makeRat(x) case *big.Float: return makeFloat(x) default: return unknownVal{} } } // BitLen returns the number of bits required to represent // the absolute value x in binary representation; x must be an [Int] or an [Unknown]. // If x is [Unknown], the result is 0. func BitLen(x Value) int { switch x := x.(type) { case int64Val: u := uint64(x) if x < 0 { u = uint64(-x) } return 64 - bits.LeadingZeros64(u) case intVal: return x.val.BitLen() case unknownVal: return 0 default: panic(fmt.Sprintf("%v not an Int", x)) } } // Sign returns -1, 0, or 1 depending on whether x < 0, x == 0, or x > 0; // x must be numeric or [Unknown]. For complex values x, the sign is 0 if x == 0, // otherwise it is != 0. If x is [Unknown], the result is 1. func Sign(x Value) int { switch x := x.(type) { case int64Val: switch { case x < 0: return -1 case x > 0: return 1 } return 0 case intVal: return x.val.Sign() case ratVal: return x.val.Sign() case floatVal: return x.val.Sign() case complexVal: return Sign(x.re) | Sign(x.im) case unknownVal: return 1 // avoid spurious division by zero errors default: panic(fmt.Sprintf("%v not numeric", x)) } } // ---------------------------------------------------------------------------- // Support for assembling/disassembling numeric values const ( // Compute the size of a Word in bytes. _m = ^big.Word(0) _log = _m>>8&1 + _m>>16&1 + _m>>32&1 wordSize = 1 << _log ) // Bytes returns the bytes for the absolute value of x in little- // endian binary representation; x must be an [Int]. func Bytes(x Value) []byte { var t intVal switch x := x.(type) { case int64Val: t = i64toi(x) case intVal: t = x default: panic(fmt.Sprintf("%v not an Int", x)) } words := t.val.Bits() bytes := make([]byte, len(words)*wordSize) i := 0 for _, w := range words { for j := 0; j < wordSize; j++ { bytes[i] = byte(w) w >>= 8 i++ } } // remove leading 0's for i > 0 && bytes[i-1] == 0 { i-- } return bytes[:i] } // MakeFromBytes returns the [Int] value given the bytes of its little-endian // binary representation. An empty byte slice argument represents 0. func MakeFromBytes(bytes []byte) Value { words := make([]big.Word, (len(bytes)+(wordSize-1))/wordSize) i := 0 var w big.Word var s uint for _, b := range bytes { w |= big.Word(b) << s if s += 8; s == wordSize*8 { words[i] = w i++ w = 0 s = 0 } } // store last word if i < len(words) { words[i] = w i++ } // remove leading 0's for i > 0 && words[i-1] == 0 { i-- } return makeInt(newInt().SetBits(words[:i])) } // Num returns the numerator of x; x must be [Int], [Float], or [Unknown]. // If x is [Unknown], or if it is too large or small to represent as a // fraction, the result is [Unknown]. Otherwise the result is an [Int] // with the same sign as x. func Num(x Value) Value { switch x := x.(type) { case int64Val, intVal: return x case ratVal: return makeInt(x.val.Num()) case floatVal: if smallFloat(x.val) { r, _ := x.val.Rat(nil) return makeInt(r.Num()) } case unknownVal: break default: panic(fmt.Sprintf("%v not Int or Float", x)) } return unknownVal{} } // Denom returns the denominator of x; x must be [Int], [Float], or [Unknown]. // If x is [Unknown], or if it is too large or small to represent as a // fraction, the result is [Unknown]. Otherwise the result is an [Int] >= 1. func Denom(x Value) Value { switch x := x.(type) { case int64Val, intVal: return int64Val(1) case ratVal: return makeInt(x.val.Denom()) case floatVal: if smallFloat(x.val) { r, _ := x.val.Rat(nil) return makeInt(r.Denom()) } case unknownVal: break default: panic(fmt.Sprintf("%v not Int or Float", x)) } return unknownVal{} } // MakeImag returns the [Complex] value x*i; // x must be [Int], [Float], or [Unknown]. // If x is [Unknown], the result is [Unknown]. func MakeImag(x Value) Value { switch x.(type) { case unknownVal: return x case int64Val, intVal, ratVal, floatVal: return makeComplex(int64Val(0), x) default: panic(fmt.Sprintf("%v not Int or Float", x)) } } // Real returns the real part of x, which must be a numeric or unknown value. // If x is [Unknown], the result is [Unknown]. func Real(x Value) Value { switch x := x.(type) { case unknownVal, int64Val, intVal, ratVal, floatVal: return x case complexVal: return x.re default: panic(fmt.Sprintf("%v not numeric", x)) } } // Imag returns the imaginary part of x, which must be a numeric or unknown value. // If x is [Unknown], the result is [Unknown]. func Imag(x Value) Value { switch x := x.(type) { case unknownVal: return x case int64Val, intVal, ratVal, floatVal: return int64Val(0) case complexVal: return x.im default: panic(fmt.Sprintf("%v not numeric", x)) } } // ---------------------------------------------------------------------------- // Numeric conversions // ToInt converts x to an [Int] value if x is representable as an [Int]. // Otherwise it returns an [Unknown]. func ToInt(x Value) Value { switch x := x.(type) { case int64Val, intVal: return x case ratVal: if x.val.IsInt() { return makeInt(x.val.Num()) } case floatVal: // avoid creation of huge integers // (Existing tests require permitting exponents of at least 1024; // allow any value that would also be permissible as a fraction.) if smallFloat(x.val) { i := newInt() if _, acc := x.val.Int(i); acc == big.Exact { return makeInt(i) } // If we can get an integer by rounding up or down, // assume x is not an integer because of rounding // errors in prior computations. const delta = 4 // a small number of bits > 0 var t big.Float t.SetPrec(prec - delta) // try rounding down a little t.SetMode(big.ToZero) t.Set(x.val) if _, acc := t.Int(i); acc == big.Exact { return makeInt(i) } // try rounding up a little t.SetMode(big.AwayFromZero) t.Set(x.val) if _, acc := t.Int(i); acc == big.Exact { return makeInt(i) } } case complexVal: if re := ToFloat(x); re.Kind() == Float { return ToInt(re) } } return unknownVal{} } // ToFloat converts x to a [Float] value if x is representable as a [Float]. // Otherwise it returns an [Unknown]. func ToFloat(x Value) Value { switch x := x.(type) { case int64Val: return i64tor(x) // x is always a small int case intVal: if smallInt(x.val) { return itor(x) } return itof(x) case ratVal, floatVal: return x case complexVal: if Sign(x.im) == 0 { return ToFloat(x.re) } } return unknownVal{} } // ToComplex converts x to a [Complex] value if x is representable as a [Complex]. // Otherwise it returns an [Unknown]. func ToComplex(x Value) Value { switch x := x.(type) { case int64Val, intVal, ratVal, floatVal: return vtoc(x) case complexVal: return x } return unknownVal{} } // ---------------------------------------------------------------------------- // Operations // is32bit reports whether x can be represented using 32 bits. func is32bit(x int64) bool { const s = 32 return -1<<(s-1) <= x && x <= 1<<(s-1)-1 } // is63bit reports whether x can be represented using 63 bits. func is63bit(x int64) bool { const s = 63 return -1<<(s-1) <= x && x <= 1<<(s-1)-1 } // UnaryOp returns the result of the unary expression op y. // The operation must be defined for the operand. // If prec > 0 it specifies the ^ (xor) result size in bits. // If y is [Unknown], the result is [Unknown]. func UnaryOp(op token.Token, y Value, prec uint) Value { switch op { case token.ADD: switch y.(type) { case unknownVal, int64Val, intVal, ratVal, floatVal, complexVal: return y } case token.SUB: switch y := y.(type) { case unknownVal: return y case int64Val: if z := -y; z != y { return z // no overflow } return makeInt(newInt().Neg(big.NewInt(int64(y)))) case intVal: return makeInt(newInt().Neg(y.val)) case ratVal: return makeRat(newRat().Neg(y.val)) case floatVal: return makeFloat(newFloat().Neg(y.val)) case complexVal: re := UnaryOp(token.SUB, y.re, 0) im := UnaryOp(token.SUB, y.im, 0) return makeComplex(re, im) } case token.XOR: z := newInt() switch y := y.(type) { case unknownVal: return y case int64Val: z.Not(big.NewInt(int64(y))) case intVal: z.Not(y.val) default: goto Error } // For unsigned types, the result will be negative and // thus "too large": We must limit the result precision // to the type's precision. if prec > 0 { z.AndNot(z, newInt().Lsh(big.NewInt(-1), prec)) // z &^= (-1)<<prec } return makeInt(z) case token.NOT: switch y := y.(type) { case unknownVal: return y case boolVal: return !y } } Error: panic(fmt.Sprintf("invalid unary operation %s%v", op, y)) } func ord(x Value) int { switch x.(type) { default: // force invalid value into "x position" in match // (don't panic here so that callers can provide a better error message) return -1 case unknownVal: return 0 case boolVal, *stringVal: return 1 case int64Val: return 2 case intVal: return 3 case ratVal: return 4 case floatVal: return 5 case complexVal: return 6 } } // match returns the matching representation (same type) with the // smallest complexity for two values x and y. If one of them is // numeric, both of them must be numeric. If one of them is Unknown // or invalid (say, nil) both results are that value. func match(x, y Value) (_, _ Value) { switch ox, oy := ord(x), ord(y); { case ox < oy: x, y = match0(x, y) case ox > oy: y, x = match0(y, x) } return x, y } // match0 must only be called by match. // Invariant: ord(x) < ord(y) func match0(x, y Value) (_, _ Value) { // Prefer to return the original x and y arguments when possible, // to avoid unnecessary heap allocations. switch y.(type) { case intVal: switch x1 := x.(type) { case int64Val: return i64toi(x1), y } case ratVal: switch x1 := x.(type) { case int64Val: return i64tor(x1), y case intVal: return itor(x1), y } case floatVal: switch x1 := x.(type) { case int64Val: return i64tof(x1), y case intVal: return itof(x1), y case ratVal: return rtof(x1), y } case complexVal: return vtoc(x), y } // force unknown and invalid values into "x position" in callers of match // (don't panic here so that callers can provide a better error message) return x, x } // BinaryOp returns the result of the binary expression x op y. // The operation must be defined for the operands. If one of the // operands is [Unknown], the result is [Unknown]. // BinaryOp doesn't handle comparisons or shifts; use [Compare] // or [Shift] instead. // // To force integer division of [Int] operands, use op == [token.QUO_ASSIGN] // instead of [token.QUO]; the result is guaranteed to be [Int] in this case. // Division by zero leads to a run-time panic. func BinaryOp(x_ Value, op token.Token, y_ Value) Value { x, y := match(x_, y_) switch x := x.(type) { case unknownVal: return x case boolVal: y := y.(boolVal) switch op { case token.LAND: return x && y case token.LOR: return x || y } case int64Val: a := int64(x) b := int64(y.(int64Val)) var c int64 switch op { case token.ADD: if !is63bit(a) || !is63bit(b) { return makeInt(newInt().Add(big.NewInt(a), big.NewInt(b))) } c = a + b case token.SUB: if !is63bit(a) || !is63bit(b) { return makeInt(newInt().Sub(big.NewInt(a), big.NewInt(b))) } c = a - b case token.MUL: if !is32bit(a) || !is32bit(b) { return makeInt(newInt().Mul(big.NewInt(a), big.NewInt(b))) } c = a * b case token.QUO: return makeRat(big.NewRat(a, b)) case token.QUO_ASSIGN: // force integer division c = a / b case token.REM: c = a % b case token.AND: c = a & b case token.OR: c = a | b case token.XOR: c = a ^ b case token.AND_NOT: c = a &^ b default: goto Error } return int64Val(c) case intVal: a := x.val b := y.(intVal).val c := newInt() switch op { case token.ADD: c.Add(a, b) case token.SUB: c.Sub(a, b) case token.MUL: c.Mul(a, b) case token.QUO: return makeRat(newRat().SetFrac(a, b)) case token.QUO_ASSIGN: // force integer division c.Quo(a, b) case token.REM: c.Rem(a, b) case token.AND: c.And(a, b) case token.OR: c.Or(a, b) case token.XOR: c.Xor(a, b) case token.AND_NOT: c.AndNot(a, b) default: goto Error } return makeInt(c) case ratVal: a := x.val b := y.(ratVal).val c := newRat() switch op { case token.ADD: c.Add(a, b) case token.SUB: c.Sub(a, b) case token.MUL: c.Mul(a, b) case token.QUO: c.Quo(a, b) default: goto Error } return makeRat(c) case floatVal: a := x.val b := y.(floatVal).val c := newFloat() switch op { case token.ADD: c.Add(a, b) case token.SUB: c.Sub(a, b) case token.MUL: c.Mul(a, b) case token.QUO: c.Quo(a, b) default: goto Error } return makeFloat(c) case complexVal: y := y.(complexVal) a, b := x.re, x.im c, d := y.re, y.im var re, im Value switch op { case token.ADD: // (a+c) + i(b+d) re = add(a, c) im = add(b, d) case token.SUB: // (a-c) + i(b-d) re = sub(a, c) im = sub(b, d) case token.MUL: // (ac-bd) + i(bc+ad) ac := mul(a, c) bd := mul(b, d) bc := mul(b, c) ad := mul(a, d) re = sub(ac, bd) im = add(bc, ad) case token.QUO: // (ac+bd)/s + i(bc-ad)/s, with s = cc + dd ac := mul(a, c) bd := mul(b, d) bc := mul(b, c) ad := mul(a, d) cc := mul(c, c) dd := mul(d, d) s := add(cc, dd) re = add(ac, bd) re = quo(re, s) im = sub(bc, ad) im = quo(im, s) default: goto Error } return makeComplex(re, im) case *stringVal: if op == token.ADD { return &stringVal{l: x, r: y.(*stringVal)} } } Error: panic(fmt.Sprintf("invalid binary operation %v %s %v", x_, op, y_)) } func add(x, y Value) Value { return BinaryOp(x, token.ADD, y) } func sub(x, y Value) Value { return BinaryOp(x, token.SUB, y) } func mul(x, y Value) Value { return BinaryOp(x, token.MUL, y) } func quo(x, y Value) Value { return BinaryOp(x, token.QUO, y) } // Shift returns the result of the shift expression x op s // with op == [token.SHL] or [token.SHR] (<< or >>). x must be // an [Int] or an [Unknown]. If x is [Unknown], the result is x. func Shift(x Value, op token.Token, s uint) Value { switch x := x.(type) { case unknownVal: return x case int64Val: if s == 0 { return x } switch op { case token.SHL: z := i64toi(x).val return makeInt(z.Lsh(z, s)) case token.SHR: return x >> s } case intVal: if s == 0 { return x } z := newInt() switch op { case token.SHL: return makeInt(z.Lsh(x.val, s)) case token.SHR: return makeInt(z.Rsh(x.val, s)) } } panic(fmt.Sprintf("invalid shift %v %s %d", x, op, s)) } func cmpZero(x int, op token.Token) bool { switch op { case token.EQL: return x == 0 case token.NEQ: return x != 0 case token.LSS: return x < 0 case token.LEQ: return x <= 0 case token.GTR: return x > 0 case token.GEQ: return x >= 0 } panic(fmt.Sprintf("invalid comparison %v %s 0", x, op)) } // Compare returns the result of the comparison x op y. // The comparison must be defined for the operands. // If one of the operands is [Unknown], the result is // false. func Compare(x_ Value, op token.Token, y_ Value) bool { x, y := match(x_, y_) switch x := x.(type) { case unknownVal: return false case boolVal: y := y.(boolVal) switch op { case token.EQL: return x == y case token.NEQ: return x != y } case int64Val: y := y.(int64Val) switch op { case token.EQL: return x == y case token.NEQ: return x != y case token.LSS: return x < y case token.LEQ: return x <= y case token.GTR: return x > y case token.GEQ: return x >= y } case intVal: return cmpZero(x.val.Cmp(y.(intVal).val), op) case ratVal: return cmpZero(x.val.Cmp(y.(ratVal).val), op) case floatVal: return cmpZero(x.val.Cmp(y.(floatVal).val), op) case complexVal: y := y.(complexVal) re := Compare(x.re, token.EQL, y.re) im := Compare(x.im, token.EQL, y.im) switch op { case token.EQL: return re && im case token.NEQ: return !re || !im } case *stringVal: xs := x.string() ys := y.(*stringVal).string() switch op { case token.EQL: return xs == ys case token.NEQ: return xs != ys case token.LSS: return xs < ys case token.LEQ: return xs <= ys case token.GTR: return xs > ys case token.GEQ: return xs >= ys } } panic(fmt.Sprintf("invalid comparison %v %s %v", x_, op, y_)) }
go/src/go/constant/value.go/0
{ "file_path": "go/src/go/constant/value.go", "repo_id": "go", "token_count": 13570 }
247
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package doc import ( "cmp" "fmt" "go/ast" "go/token" "internal/lazyregexp" "path" "slices" "strconv" "strings" "unicode" "unicode/utf8" ) // ---------------------------------------------------------------------------- // function/method sets // // Internally, we treat functions like methods and collect them in method sets. // A methodSet describes a set of methods. Entries where Decl == nil are conflict // entries (more than one method with the same name at the same embedding level). type methodSet map[string]*Func // recvString returns a string representation of recv of the form "T", "*T", // "T[A, ...]", "*T[A, ...]" or "BADRECV" (if not a proper receiver type). func recvString(recv ast.Expr) string { switch t := recv.(type) { case *ast.Ident: return t.Name case *ast.StarExpr: return "*" + recvString(t.X) case *ast.IndexExpr: // Generic type with one parameter. return fmt.Sprintf("%s[%s]", recvString(t.X), recvParam(t.Index)) case *ast.IndexListExpr: // Generic type with multiple parameters. if len(t.Indices) > 0 { var b strings.Builder b.WriteString(recvString(t.X)) b.WriteByte('[') b.WriteString(recvParam(t.Indices[0])) for _, e := range t.Indices[1:] { b.WriteString(", ") b.WriteString(recvParam(e)) } b.WriteByte(']') return b.String() } } return "BADRECV" } func recvParam(p ast.Expr) string { if id, ok := p.(*ast.Ident); ok { return id.Name } return "BADPARAM" } // set creates the corresponding Func for f and adds it to mset. // If there are multiple f's with the same name, set keeps the first // one with documentation; conflicts are ignored. The boolean // specifies whether to leave the AST untouched. func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) { name := f.Name.Name if g := mset[name]; g != nil && g.Doc != "" { // A function with the same name has already been registered; // since it has documentation, assume f is simply another // implementation and ignore it. This does not happen if the // caller is using go/build.ScanDir to determine the list of // files implementing a package. return } // function doesn't exist or has no documentation; use f recv := "" if f.Recv != nil { var typ ast.Expr // be careful in case of incorrect ASTs if list := f.Recv.List; len(list) == 1 { typ = list[0].Type } recv = recvString(typ) } mset[name] = &Func{ Doc: f.Doc.Text(), Name: name, Decl: f, Recv: recv, Orig: recv, } if !preserveAST { f.Doc = nil // doc consumed - remove from AST } } // add adds method m to the method set; m is ignored if the method set // already contains a method with the same name at the same or a higher // level than m. func (mset methodSet) add(m *Func) { old := mset[m.Name] if old == nil || m.Level < old.Level { mset[m.Name] = m return } if m.Level == old.Level { // conflict - mark it using a method with nil Decl mset[m.Name] = &Func{ Name: m.Name, Level: m.Level, } } } // ---------------------------------------------------------------------------- // Named types // baseTypeName returns the name of the base type of x (or "") // and whether the type is imported or not. func baseTypeName(x ast.Expr) (name string, imported bool) { switch t := x.(type) { case *ast.Ident: return t.Name, false case *ast.IndexExpr: return baseTypeName(t.X) case *ast.IndexListExpr: return baseTypeName(t.X) case *ast.SelectorExpr: if _, ok := t.X.(*ast.Ident); ok { // only possible for qualified type names; // assume type is imported return t.Sel.Name, true } case *ast.ParenExpr: return baseTypeName(t.X) case *ast.StarExpr: return baseTypeName(t.X) } return "", false } // An embeddedSet describes a set of embedded types. type embeddedSet map[*namedType]bool // A namedType represents a named unqualified (package local, or possibly // predeclared) type. The namedType for a type name is always found via // reader.lookupType. type namedType struct { doc string // doc comment for type name string // type name decl *ast.GenDecl // nil if declaration hasn't been seen yet isEmbedded bool // true if this type is embedded isStruct bool // true if this type is a struct embedded embeddedSet // true if the embedded type is a pointer // associated declarations values []*Value // consts and vars funcs methodSet methods methodSet } // ---------------------------------------------------------------------------- // AST reader // reader accumulates documentation for a single package. // It modifies the AST: Comments (declaration documentation) // that have been collected by the reader are set to nil // in the respective AST nodes so that they are not printed // twice (once when printing the documentation and once when // printing the corresponding AST node). type reader struct { mode Mode // package properties doc string // package documentation, if any filenames []string notes map[string][]*Note // imports imports map[string]int hasDotImp bool // if set, package contains a dot import importByName map[string]string // declarations values []*Value // consts and vars order int // sort order of const and var declarations (when we can't use a name) types map[string]*namedType funcs methodSet // support for package-local shadowing of predeclared types shadowedPredecl map[string]bool fixmap map[string][]*ast.InterfaceType } func (r *reader) isVisible(name string) bool { return r.mode&AllDecls != 0 || token.IsExported(name) } // lookupType returns the base type with the given name. // If the base type has not been encountered yet, a new // type with the given name but no associated declaration // is added to the type map. func (r *reader) lookupType(name string) *namedType { if name == "" || name == "_" { return nil // no type docs for anonymous types } if typ, found := r.types[name]; found { return typ } // type not found - add one without declaration typ := &namedType{ name: name, embedded: make(embeddedSet), funcs: make(methodSet), methods: make(methodSet), } r.types[name] = typ return typ } // recordAnonymousField registers fieldType as the type of an // anonymous field in the parent type. If the field is imported // (qualified name) or the parent is nil, the field is ignored. // The function returns the field name. func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) { fname, imp := baseTypeName(fieldType) if parent == nil || imp { return } if ftype := r.lookupType(fname); ftype != nil { ftype.isEmbedded = true _, ptr := fieldType.(*ast.StarExpr) parent.embedded[ftype] = ptr } return } func (r *reader) readDoc(comment *ast.CommentGroup) { // By convention there should be only one package comment // but collect all of them if there are more than one. text := comment.Text() if r.doc == "" { r.doc = text return } r.doc += "\n" + text } func (r *reader) remember(predecl string, typ *ast.InterfaceType) { if r.fixmap == nil { r.fixmap = make(map[string][]*ast.InterfaceType) } r.fixmap[predecl] = append(r.fixmap[predecl], typ) } func specNames(specs []ast.Spec) []string { names := make([]string, 0, len(specs)) // reasonable estimate for _, s := range specs { // s guaranteed to be an *ast.ValueSpec by readValue for _, ident := range s.(*ast.ValueSpec).Names { names = append(names, ident.Name) } } return names } // readValue processes a const or var declaration. func (r *reader) readValue(decl *ast.GenDecl) { // determine if decl should be associated with a type // Heuristic: For each typed entry, determine the type name, if any. // If there is exactly one type name that is sufficiently // frequent, associate the decl with the respective type. domName := "" domFreq := 0 prev := "" n := 0 for _, spec := range decl.Specs { s, ok := spec.(*ast.ValueSpec) if !ok { continue // should not happen, but be conservative } name := "" switch { case s.Type != nil: // a type is present; determine its name if n, imp := baseTypeName(s.Type); !imp { name = n } case decl.Tok == token.CONST && len(s.Values) == 0: // no type or value is present but we have a constant declaration; // use the previous type name (possibly the empty string) name = prev } if name != "" { // entry has a named type if domName != "" && domName != name { // more than one type name - do not associate // with any type domName = "" break } domName = name domFreq++ } prev = name n++ } // nothing to do w/o a legal declaration if n == 0 { return } // determine values list with which to associate the Value for this decl values := &r.values const threshold = 0.75 if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) { // typed entries are sufficiently frequent if typ := r.lookupType(domName); typ != nil { values = &typ.values // associate with that type } } *values = append(*values, &Value{ Doc: decl.Doc.Text(), Names: specNames(decl.Specs), Decl: decl, order: r.order, }) if r.mode&PreserveAST == 0 { decl.Doc = nil // doc consumed - remove from AST } // Note: It's important that the order used here is global because the cleanupTypes // methods may move values associated with types back into the global list. If the // order is list-specific, sorting is not deterministic because the same order value // may appear multiple times (was bug, found when fixing #16153). r.order++ } // fields returns a struct's fields or an interface's methods. func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) { var fields *ast.FieldList switch t := typ.(type) { case *ast.StructType: fields = t.Fields isStruct = true case *ast.InterfaceType: fields = t.Methods } if fields != nil { list = fields.List } return } // readType processes a type declaration. func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) { typ := r.lookupType(spec.Name.Name) if typ == nil { return // no name or blank name - ignore the type } // A type should be added at most once, so typ.decl // should be nil - if it is not, simply overwrite it. typ.decl = decl // compute documentation doc := spec.Doc if doc == nil { // no doc associated with the spec, use the declaration doc, if any doc = decl.Doc } if r.mode&PreserveAST == 0 { spec.Doc = nil // doc consumed - remove from AST decl.Doc = nil // doc consumed - remove from AST } typ.doc = doc.Text() // record anonymous fields (they may contribute methods) // (some fields may have been recorded already when filtering // exports, but that's ok) var list []*ast.Field list, typ.isStruct = fields(spec.Type) for _, field := range list { if len(field.Names) == 0 { r.recordAnonymousField(typ, field.Type) } } } // isPredeclared reports whether n denotes a predeclared type. func (r *reader) isPredeclared(n string) bool { return predeclaredTypes[n] && r.types[n] == nil } // readFunc processes a func or method declaration. func (r *reader) readFunc(fun *ast.FuncDecl) { // strip function body if requested. if r.mode&PreserveAST == 0 { fun.Body = nil } // associate methods with the receiver type, if any if fun.Recv != nil { // method if len(fun.Recv.List) == 0 { // should not happen (incorrect AST); (See issue 17788) // don't show this method return } recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type) if imp { // should not happen (incorrect AST); // don't show this method return } if typ := r.lookupType(recvTypeName); typ != nil { typ.methods.set(fun, r.mode&PreserveAST != 0) } // otherwise ignore the method // TODO(gri): There may be exported methods of non-exported types // that can be called because of exported values (consts, vars, or // function results) of that type. Could determine if that is the // case and then show those methods in an appropriate section. return } // Associate factory functions with the first visible result type, as long as // others are predeclared types. if fun.Type.Results.NumFields() >= 1 { var typ *namedType // type to associate the function with numResultTypes := 0 for _, res := range fun.Type.Results.List { factoryType := res.Type if t, ok := factoryType.(*ast.ArrayType); ok { // We consider functions that return slices or arrays of type // T (or pointers to T) as factory functions of T. factoryType = t.Elt } if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) && !r.isPredeclared(n) { if lookupTypeParam(n, fun.Type.TypeParams) != nil { // Issue #49477: don't associate fun with its type parameter result. // A type parameter is not a defined type. continue } if t := r.lookupType(n); t != nil { typ = t numResultTypes++ if numResultTypes > 1 { break } } } } // If there is exactly one result type, // associate the function with that type. if numResultTypes == 1 { typ.funcs.set(fun, r.mode&PreserveAST != 0) return } } // just an ordinary function r.funcs.set(fun, r.mode&PreserveAST != 0) } // lookupTypeParam searches for type parameters named name within the tparams // field list, returning the relevant identifier if found, or nil if not. func lookupTypeParam(name string, tparams *ast.FieldList) *ast.Ident { if tparams == nil { return nil } for _, field := range tparams.List { for _, id := range field.Names { if id.Name == name { return id } } } return nil } var ( noteMarker = `([A-Z][A-Z]+)\(([^)]+)\):?` // MARKER(uid), MARKER at least 2 chars, uid at least 1 char noteMarkerRx = lazyregexp.New(`^[ \t]*` + noteMarker) // MARKER(uid) at text start noteCommentRx = lazyregexp.New(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start ) // clean replaces each sequence of space, \r, or \t characters // with a single space and removes any trailing and leading spaces. func clean(s string) string { var b []byte p := byte(' ') for i := 0; i < len(s); i++ { q := s[i] if q == '\r' || q == '\t' { q = ' ' } if q != ' ' || p != ' ' { b = append(b, q) p = q } } // remove trailing blank, if any if n := len(b); n > 0 && p == ' ' { b = b[0 : n-1] } return string(b) } // readNote collects a single note from a sequence of comments. func (r *reader) readNote(list []*ast.Comment) { text := (&ast.CommentGroup{List: list}).Text() if m := noteMarkerRx.FindStringSubmatchIndex(text); m != nil { // The note body starts after the marker. // We remove any formatting so that we don't // get spurious line breaks/indentation when // showing the TODO body. body := clean(text[m[1]:]) if body != "" { marker := text[m[2]:m[3]] r.notes[marker] = append(r.notes[marker], &Note{ Pos: list[0].Pos(), End: list[len(list)-1].End(), UID: text[m[4]:m[5]], Body: body, }) } } } // readNotes extracts notes from comments. // A note must start at the beginning of a comment with "MARKER(uid):" // and is followed by the note body (e.g., "// BUG(gri): fix this"). // The note ends at the end of the comment group or at the start of // another note in the same comment group, whichever comes first. func (r *reader) readNotes(comments []*ast.CommentGroup) { for _, group := range comments { i := -1 // comment index of most recent note start, valid if >= 0 list := group.List for j, c := range list { if noteCommentRx.MatchString(c.Text) { if i >= 0 { r.readNote(list[i:j]) } i = j } } if i >= 0 { r.readNote(list[i:]) } } } // readFile adds the AST for a source file to the reader. func (r *reader) readFile(src *ast.File) { // add package documentation if src.Doc != nil { r.readDoc(src.Doc) if r.mode&PreserveAST == 0 { src.Doc = nil // doc consumed - remove from AST } } // add all declarations but for functions which are processed in a separate pass for _, decl := range src.Decls { switch d := decl.(type) { case *ast.GenDecl: switch d.Tok { case token.IMPORT: // imports are handled individually for _, spec := range d.Specs { if s, ok := spec.(*ast.ImportSpec); ok { if import_, err := strconv.Unquote(s.Path.Value); err == nil { r.imports[import_] = 1 var name string if s.Name != nil { name = s.Name.Name if name == "." { r.hasDotImp = true } } if name != "." { if name == "" { name = assumedPackageName(import_) } old, ok := r.importByName[name] if !ok { r.importByName[name] = import_ } else if old != import_ && old != "" { r.importByName[name] = "" // ambiguous } } } } } case token.CONST, token.VAR: // constants and variables are always handled as a group r.readValue(d) case token.TYPE: // types are handled individually if len(d.Specs) == 1 && !d.Lparen.IsValid() { // common case: single declaration w/o parentheses // (if a single declaration is parenthesized, // create a new fake declaration below, so that // go/doc type declarations always appear w/o // parentheses) if s, ok := d.Specs[0].(*ast.TypeSpec); ok { r.readType(d, s) } break } for _, spec := range d.Specs { if s, ok := spec.(*ast.TypeSpec); ok { // use an individual (possibly fake) declaration // for each type; this also ensures that each type // gets to (re-)use the declaration documentation // if there's none associated with the spec itself fake := &ast.GenDecl{ Doc: d.Doc, // don't use the existing TokPos because it // will lead to the wrong selection range for // the fake declaration if there are more // than one type in the group (this affects // src/cmd/godoc/godoc.go's posLink_urlFunc) TokPos: s.Pos(), Tok: token.TYPE, Specs: []ast.Spec{s}, } r.readType(fake, s) } } } } } // collect MARKER(...): annotations r.readNotes(src.Comments) if r.mode&PreserveAST == 0 { src.Comments = nil // consumed unassociated comments - remove from AST } } func (r *reader) readPackage(pkg *ast.Package, mode Mode) { // initialize reader r.filenames = make([]string, len(pkg.Files)) r.imports = make(map[string]int) r.mode = mode r.types = make(map[string]*namedType) r.funcs = make(methodSet) r.notes = make(map[string][]*Note) r.importByName = make(map[string]string) // sort package files before reading them so that the // result does not depend on map iteration order i := 0 for filename := range pkg.Files { r.filenames[i] = filename i++ } slices.Sort(r.filenames) // process files in sorted order for _, filename := range r.filenames { f := pkg.Files[filename] if mode&AllDecls == 0 { r.fileExports(f) } r.readFile(f) } for name, path := range r.importByName { if path == "" { delete(r.importByName, name) } } // process functions now that we have better type information for _, f := range pkg.Files { for _, decl := range f.Decls { if d, ok := decl.(*ast.FuncDecl); ok { r.readFunc(d) } } } } // ---------------------------------------------------------------------------- // Types func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func { if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 { return f // shouldn't happen, but be safe } // copy existing receiver field and set new type newField := *f.Decl.Recv.List[0] origPos := newField.Type.Pos() _, origRecvIsPtr := newField.Type.(*ast.StarExpr) newIdent := &ast.Ident{NamePos: origPos, Name: recvTypeName} var typ ast.Expr = newIdent if !embeddedIsPtr && origRecvIsPtr { newIdent.NamePos++ // '*' is one character typ = &ast.StarExpr{Star: origPos, X: newIdent} } newField.Type = typ // copy existing receiver field list and set new receiver field newFieldList := *f.Decl.Recv newFieldList.List = []*ast.Field{&newField} // copy existing function declaration and set new receiver field list newFuncDecl := *f.Decl newFuncDecl.Recv = &newFieldList // copy existing function documentation and set new declaration newF := *f newF.Decl = &newFuncDecl newF.Recv = recvString(typ) // the Orig field never changes newF.Level = level return &newF } // collectEmbeddedMethods collects the embedded methods of typ in mset. func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) { visited[typ] = true for embedded, isPtr := range typ.embedded { // Once an embedded type is embedded as a pointer type // all embedded types in those types are treated like // pointer types for the purpose of the receiver type // computation; i.e., embeddedIsPtr is sticky for this // embedding hierarchy. thisEmbeddedIsPtr := embeddedIsPtr || isPtr for _, m := range embedded.methods { // only top-level methods are embedded if m.Level == 0 { mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level)) } } if !visited[embedded] { r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited) } } delete(visited, typ) } // computeMethodSets determines the actual method sets for each type encountered. func (r *reader) computeMethodSets() { for _, t := range r.types { // collect embedded methods for t if t.isStruct { // struct r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet)) } else { // interface // TODO(gri) fix this } } // For any predeclared names that are declared locally, don't treat them as // exported fields anymore. for predecl := range r.shadowedPredecl { for _, ityp := range r.fixmap[predecl] { removeAnonymousField(predecl, ityp) } } } // cleanupTypes removes the association of functions and methods with // types that have no declaration. Instead, these functions and methods // are shown at the package level. It also removes types with missing // declarations or which are not visible. func (r *reader) cleanupTypes() { for _, t := range r.types { visible := r.isVisible(t.name) predeclared := predeclaredTypes[t.name] if t.decl == nil && (predeclared || visible && (t.isEmbedded || r.hasDotImp)) { // t.name is a predeclared type (and was not redeclared in this package), // or it was embedded somewhere but its declaration is missing (because // the AST is incomplete), or we have a dot-import (and all bets are off): // move any associated values, funcs, and methods back to the top-level so // that they are not lost. // 1) move values r.values = append(r.values, t.values...) // 2) move factory functions for name, f := range t.funcs { // in a correct AST, package-level function names // are all different - no need to check for conflicts r.funcs[name] = f } // 3) move methods if !predeclared { for name, m := range t.methods { // don't overwrite functions with the same name - drop them if _, found := r.funcs[name]; !found { r.funcs[name] = m } } } } // remove types w/o declaration or which are not visible if t.decl == nil || !visible { delete(r.types, t.name) } } } // ---------------------------------------------------------------------------- // Sorting func sortedKeys(m map[string]int) []string { list := make([]string, len(m)) i := 0 for key := range m { list[i] = key i++ } slices.Sort(list) return list } // sortingName returns the name to use when sorting d into place. func sortingName(d *ast.GenDecl) string { if len(d.Specs) == 1 { if s, ok := d.Specs[0].(*ast.ValueSpec); ok { return s.Names[0].Name } } return "" } func sortedValues(m []*Value, tok token.Token) []*Value { list := make([]*Value, len(m)) // big enough in any case i := 0 for _, val := range m { if val.Decl.Tok == tok { list[i] = val i++ } } list = list[0:i] slices.SortFunc(list, func(a, b *Value) int { r := strings.Compare(sortingName(a.Decl), sortingName(b.Decl)) if r != 0 { return r } return cmp.Compare(a.order, b.order) }) return list } func sortedTypes(m map[string]*namedType, allMethods bool) []*Type { list := make([]*Type, len(m)) i := 0 for _, t := range m { list[i] = &Type{ Doc: t.doc, Name: t.name, Decl: t.decl, Consts: sortedValues(t.values, token.CONST), Vars: sortedValues(t.values, token.VAR), Funcs: sortedFuncs(t.funcs, true), Methods: sortedFuncs(t.methods, allMethods), } i++ } slices.SortFunc(list, func(a, b *Type) int { return strings.Compare(a.Name, b.Name) }) return list } func removeStar(s string) string { if len(s) > 0 && s[0] == '*' { return s[1:] } return s } func sortedFuncs(m methodSet, allMethods bool) []*Func { list := make([]*Func, len(m)) i := 0 for _, m := range m { // determine which methods to include switch { case m.Decl == nil: // exclude conflict entry case allMethods, m.Level == 0, !token.IsExported(removeStar(m.Orig)): // forced inclusion, method not embedded, or method // embedded but original receiver type not exported list[i] = m i++ } } list = list[0:i] slices.SortFunc(list, func(a, b *Func) int { return strings.Compare(a.Name, b.Name) }) return list } // noteBodies returns a list of note body strings given a list of notes. // This is only used to populate the deprecated Package.Bugs field. func noteBodies(notes []*Note) []string { var list []string for _, n := range notes { list = append(list, n.Body) } return list } // ---------------------------------------------------------------------------- // Predeclared identifiers // IsPredeclared reports whether s is a predeclared identifier. func IsPredeclared(s string) bool { return predeclaredTypes[s] || predeclaredFuncs[s] || predeclaredConstants[s] } var predeclaredTypes = map[string]bool{ "any": true, "bool": true, "byte": true, "comparable": true, "complex64": true, "complex128": true, "error": true, "float32": true, "float64": true, "int": true, "int8": true, "int16": true, "int32": true, "int64": true, "rune": true, "string": true, "uint": true, "uint8": true, "uint16": true, "uint32": true, "uint64": true, "uintptr": true, } var predeclaredFuncs = map[string]bool{ "append": true, "cap": true, "clear": true, "close": true, "complex": true, "copy": true, "delete": true, "imag": true, "len": true, "make": true, "max": true, "min": true, "new": true, "panic": true, "print": true, "println": true, "real": true, "recover": true, } var predeclaredConstants = map[string]bool{ "false": true, "iota": true, "nil": true, "true": true, } // assumedPackageName returns the assumed package name // for a given import path. This is a copy of // golang.org/x/tools/internal/imports.ImportPathToAssumedName. func assumedPackageName(importPath string) string { notIdentifier := func(ch rune) bool { return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || '0' <= ch && ch <= '9' || ch == '_' || ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) } base := path.Base(importPath) if strings.HasPrefix(base, "v") { if _, err := strconv.Atoi(base[1:]); err == nil { dir := path.Dir(importPath) if dir != "." { base = path.Base(dir) } } } base = strings.TrimPrefix(base, "go-") if i := strings.IndexFunc(base, notIdentifier); i >= 0 { base = base[:i] } return base }
go/src/go/doc/reader.go/0
{ "file_path": "go/src/go/doc/reader.go", "repo_id": "go", "token_count": 10551 }
248
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package blank is a go/doc test for the handling of _. // See issue 5397. package blank import "os" type T int // T constants counting from a blank constant. const ( _ T = iota T1 T2 ) // T constants counting from unexported constants. const ( tweedledee T = iota tweedledum C1 C2 alice C3 redQueen int = iota C4 ) // Constants with a single type that is not propagated. const ( zero os.FileMode = 0 Default = 0644 Useless = 0312 WideOpen = 0777 ) // Constants with an imported type that is propagated. const ( zero os.FileMode = 0 M1 M2 M3 ) // Package constants. const ( _ int = iota I1 I2 ) // Unexported constants counting from blank iota. // See issue 9615. const ( _ = iota one = iota + 1 ) // Blanks not in doc output: // S has a padding field. type S struct { H uint32 _ uint8 A uint8 } func _() {} type _ T var _ = T(55)
go/src/go/doc/testdata/blank.go/0
{ "file_path": "go/src/go/doc/testdata/blank.go", "repo_id": "go", "token_count": 428 }
249
// Package issue12839 is a go/doc test to test association of a ... PACKAGE issue12839 IMPORTPATH testdata/issue12839 IMPORTS p FILENAMES testdata/issue12839.go FUNCTIONS // F1 should not be associated with T1 func F1() (*T1, *T2) // F10 should not be associated with T1. func F10() (T1, T2, error) // F4 should not be associated with a type (same as F1) func F4() (a T1, b T2) // F9 should not be associated with T1. func F9() (int, T1, T2) TYPES // type T1 struct{} // F2 should be associated with T1 func F2() (a, b, c T1) // F3 should be associated with T1 because b.T3 is from a ... func F3() (a T1, b p.T3) // F5 should be associated with T1. func F5() (T1, error) // F6 should be associated with T1. func F6() (*T1, error) // F7 should be associated with T1. func F7() (T1, string) // F8 should be associated with T1. func F8() (int, T1, string) // type T2 struct{}
go/src/go/doc/testdata/issue12839.0.golden/0
{ "file_path": "go/src/go/doc/testdata/issue12839.0.golden", "repo_id": "go", "token_count": 381 }
250
// PACKAGE issue22856 IMPORTPATH testdata/issue22856 FILENAMES testdata/issue22856.go FUNCTIONS // NewPointerSliceOfSlice is not a factory function because slices ... func NewPointerSliceOfSlice() [][]*T // NewSlice3 is not a factory function because 3 nested slices of ... func NewSlice3() [][][]T // NewSliceOfSlice is not a factory function because slices of a ... func NewSliceOfSlice() [][]T TYPES // type T struct{} // func New() T // func NewArray() [1]T // func NewPointer() *T // func NewPointerArray() [1]*T // func NewPointerOfPointer() **T // func NewPointerSlice() []*T // func NewSlice() []T
go/src/go/doc/testdata/issue22856.0.golden/0
{ "file_path": "go/src/go/doc/testdata/issue22856.0.golden", "repo_id": "go", "token_count": 259 }
251
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package format implements standard formatting of Go source. // // Note that formatting of Go source code changes over time, so tools relying on // consistent formatting should execute a specific version of the gofmt binary // instead of using this package. That way, the formatting will be stable, and // the tools won't need to be recompiled each time gofmt changes. // // For example, pre-submit checks that use this package directly would behave // differently depending on what Go version each developer uses, causing the // check to be inherently fragile. package format import ( "bytes" "fmt" "go/ast" "go/parser" "go/printer" "go/token" "io" ) // Keep these in sync with cmd/gofmt/gofmt.go. const ( tabWidth = 8 printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers // printerNormalizeNumbers means to canonicalize number literal prefixes // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. // // This value is defined in go/printer specifically for go/format and cmd/gofmt. printerNormalizeNumbers = 1 << 30 ) var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth} const parserMode = parser.ParseComments | parser.SkipObjectResolution // Node formats node in canonical gofmt style and writes the result to dst. // // The node type must be *[ast.File], *[printer.CommentedNode], [][ast.Decl], // [][ast.Stmt], or assignment-compatible to [ast.Expr], [ast.Decl], [ast.Spec], // or [ast.Stmt]. Node does not modify node. Imports are not sorted for // nodes representing partial source files (for instance, if the node is // not an *[ast.File] or a *[printer.CommentedNode] not wrapping an *[ast.File]). // // The function may return early (before the entire result is written) // and return a formatting error, for instance due to an incorrect AST. func Node(dst io.Writer, fset *token.FileSet, node any) error { // Determine if we have a complete source file (file != nil). var file *ast.File var cnode *printer.CommentedNode switch n := node.(type) { case *ast.File: file = n case *printer.CommentedNode: if f, ok := n.Node.(*ast.File); ok { file = f cnode = n } } // Sort imports if necessary. if file != nil && hasUnsortedImports(file) { // Make a copy of the AST because ast.SortImports is destructive. // TODO(gri) Do this more efficiently. var buf bytes.Buffer err := config.Fprint(&buf, fset, file) if err != nil { return err } file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode) if err != nil { // We should never get here. If we do, provide good diagnostic. return fmt.Errorf("format.Node internal error (%s)", err) } ast.SortImports(fset, file) // Use new file with sorted imports. node = file if cnode != nil { node = &printer.CommentedNode{Node: file, Comments: cnode.Comments} } } return config.Fprint(dst, fset, node) } // Source formats src in canonical gofmt style and returns the result // or an (I/O or syntax) error. src is expected to be a syntactically // correct Go source file, or a list of Go declarations or statements. // // If src is a partial source file, the leading and trailing space of src // is applied to the result (such that it has the same leading and trailing // space as src), and the result is indented by the same amount as the first // line of src containing code. Imports are not sorted for partial source files. func Source(src []byte) ([]byte, error) { fset := token.NewFileSet() file, sourceAdj, indentAdj, err := parse(fset, "", src, true) if err != nil { return nil, err } if sourceAdj == nil { // Complete source file. // TODO(gri) consider doing this always. ast.SortImports(fset, file) } return format(fset, file, sourceAdj, indentAdj, src, config) } func hasUnsortedImports(file *ast.File) bool { for _, d := range file.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { // Not an import declaration, so we're done. // Imports are always first. return false } if d.Lparen.IsValid() { // For now assume all grouped imports are unsorted. // TODO(gri) Should check if they are sorted already. return true } // Ungrouped imports are sorted by default. } return false }
go/src/go/format/format.go/0
{ "file_path": "go/src/go/format/format.go", "repo_id": "go", "token_count": 1419 }
252
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package parser import ( "fmt" "go/ast" "go/token" "strings" ) const debugResolve = false // resolveFile walks the given file to resolve identifiers within the file // scope, updating ast.Ident.Obj fields with declaration information. // // If declErr is non-nil, it is used to report declaration errors during // resolution. tok is used to format position in error messages. func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, string)) { pkgScope := ast.NewScope(nil) r := &resolver{ handle: handle, declErr: declErr, topScope: pkgScope, pkgScope: pkgScope, depth: 1, } for _, decl := range file.Decls { ast.Walk(r, decl) } r.closeScope() assert(r.topScope == nil, "unbalanced scopes") assert(r.labelScope == nil, "unbalanced label scopes") // resolve global identifiers within the same file i := 0 for _, ident := range r.unresolved { // i <= index for current ident assert(ident.Obj == unresolved, "object already resolved") ident.Obj = r.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel if ident.Obj == nil { r.unresolved[i] = ident i++ } else if debugResolve { pos := ident.Obj.Decl.(interface{ Pos() token.Pos }).Pos() r.trace("resolved %s@%v to package object %v", ident.Name, ident.Pos(), pos) } } file.Scope = r.pkgScope file.Unresolved = r.unresolved[0:i] } const maxScopeDepth int = 1e3 type resolver struct { handle *token.File declErr func(token.Pos, string) // Ordinary identifier scopes pkgScope *ast.Scope // pkgScope.Outer == nil topScope *ast.Scope // top-most scope; may be pkgScope unresolved []*ast.Ident // unresolved identifiers depth int // scope depth // Label scopes // (maintained by open/close LabelScope) labelScope *ast.Scope // label scope for current function targetStack [][]*ast.Ident // stack of unresolved labels } func (r *resolver) trace(format string, args ...any) { fmt.Println(strings.Repeat(". ", r.depth) + r.sprintf(format, args...)) } func (r *resolver) sprintf(format string, args ...any) string { for i, arg := range args { switch arg := arg.(type) { case token.Pos: args[i] = r.handle.Position(arg) } } return fmt.Sprintf(format, args...) } func (r *resolver) openScope(pos token.Pos) { r.depth++ if r.depth > maxScopeDepth { panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"}) } if debugResolve { r.trace("opening scope @%v", pos) } r.topScope = ast.NewScope(r.topScope) } func (r *resolver) closeScope() { r.depth-- if debugResolve { r.trace("closing scope") } r.topScope = r.topScope.Outer } func (r *resolver) openLabelScope() { r.labelScope = ast.NewScope(r.labelScope) r.targetStack = append(r.targetStack, nil) } func (r *resolver) closeLabelScope() { // resolve labels n := len(r.targetStack) - 1 scope := r.labelScope for _, ident := range r.targetStack[n] { ident.Obj = scope.Lookup(ident.Name) if ident.Obj == nil && r.declErr != nil { r.declErr(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name)) } } // pop label scope r.targetStack = r.targetStack[0:n] r.labelScope = r.labelScope.Outer } func (r *resolver) declare(decl, data any, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) { for _, ident := range idents { if ident.Obj != nil { panic(fmt.Sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name)) } obj := ast.NewObj(kind, ident.Name) // remember the corresponding declaration for redeclaration // errors and global variable resolution/typechecking phase obj.Decl = decl obj.Data = data // Identifiers (for receiver type parameters) are written to the scope, but // never set as the resolved object. See go.dev/issue/50956. if _, ok := decl.(*ast.Ident); !ok { ident.Obj = obj } if ident.Name != "_" { if debugResolve { r.trace("declaring %s@%v", ident.Name, ident.Pos()) } if alt := scope.Insert(obj); alt != nil && r.declErr != nil { prevDecl := "" if pos := alt.Pos(); pos.IsValid() { prevDecl = r.sprintf("\n\tprevious declaration at %v", pos) } r.declErr(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl)) } } } } func (r *resolver) shortVarDecl(decl *ast.AssignStmt) { // Go spec: A short variable declaration may redeclare variables // provided they were originally declared in the same block with // the same type, and at least one of the non-blank variables is new. n := 0 // number of new variables for _, x := range decl.Lhs { if ident, isIdent := x.(*ast.Ident); isIdent { assert(ident.Obj == nil, "identifier already declared or resolved") obj := ast.NewObj(ast.Var, ident.Name) // remember corresponding assignment for other tools obj.Decl = decl ident.Obj = obj if ident.Name != "_" { if debugResolve { r.trace("declaring %s@%v", ident.Name, ident.Pos()) } if alt := r.topScope.Insert(obj); alt != nil { ident.Obj = alt // redeclaration } else { n++ // new declaration } } } } if n == 0 && r.declErr != nil { r.declErr(decl.Lhs[0].Pos(), "no new variables on left side of :=") } } // The unresolved object is a sentinel to mark identifiers that have been added // to the list of unresolved identifiers. The sentinel is only used for verifying // internal consistency. var unresolved = new(ast.Object) // If x is an identifier, resolve attempts to resolve x by looking up // the object it denotes. If no object is found and collectUnresolved is // set, x is marked as unresolved and collected in the list of unresolved // identifiers. func (r *resolver) resolve(ident *ast.Ident, collectUnresolved bool) { if ident.Obj != nil { panic(r.sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name)) } // '_' should never refer to existing declarations, because it has special // handling in the spec. if ident.Name == "_" { return } for s := r.topScope; s != nil; s = s.Outer { if obj := s.Lookup(ident.Name); obj != nil { if debugResolve { r.trace("resolved %v:%s to %v", ident.Pos(), ident.Name, obj) } assert(obj.Name != "", "obj with no name") // Identifiers (for receiver type parameters) are written to the scope, // but never set as the resolved object. See go.dev/issue/50956. if _, ok := obj.Decl.(*ast.Ident); !ok { ident.Obj = obj } return } } // all local scopes are known, so any unresolved identifier // must be found either in the file scope, package scope // (perhaps in another file), or universe scope --- collect // them so that they can be resolved later if collectUnresolved { ident.Obj = unresolved r.unresolved = append(r.unresolved, ident) } } func (r *resolver) walkExprs(list []ast.Expr) { for _, node := range list { ast.Walk(r, node) } } func (r *resolver) walkLHS(list []ast.Expr) { for _, expr := range list { expr := ast.Unparen(expr) if _, ok := expr.(*ast.Ident); !ok && expr != nil { ast.Walk(r, expr) } } } func (r *resolver) walkStmts(list []ast.Stmt) { for _, stmt := range list { ast.Walk(r, stmt) } } func (r *resolver) Visit(node ast.Node) ast.Visitor { if debugResolve && node != nil { r.trace("node %T@%v", node, node.Pos()) } switch n := node.(type) { // Expressions. case *ast.Ident: r.resolve(n, true) case *ast.FuncLit: r.openScope(n.Pos()) defer r.closeScope() r.walkFuncType(n.Type) r.walkBody(n.Body) case *ast.SelectorExpr: ast.Walk(r, n.X) // Note: don't try to resolve n.Sel, as we don't support qualified // resolution. case *ast.StructType: r.openScope(n.Pos()) defer r.closeScope() r.walkFieldList(n.Fields, ast.Var) case *ast.FuncType: r.openScope(n.Pos()) defer r.closeScope() r.walkFuncType(n) case *ast.CompositeLit: if n.Type != nil { ast.Walk(r, n.Type) } for _, e := range n.Elts { if kv, _ := e.(*ast.KeyValueExpr); kv != nil { // See go.dev/issue/45160: try to resolve composite lit keys, but don't // collect them as unresolved if resolution failed. This replicates // existing behavior when resolving during parsing. if ident, _ := kv.Key.(*ast.Ident); ident != nil { r.resolve(ident, false) } else { ast.Walk(r, kv.Key) } ast.Walk(r, kv.Value) } else { ast.Walk(r, e) } } case *ast.InterfaceType: r.openScope(n.Pos()) defer r.closeScope() r.walkFieldList(n.Methods, ast.Fun) // Statements case *ast.LabeledStmt: r.declare(n, nil, r.labelScope, ast.Lbl, n.Label) ast.Walk(r, n.Stmt) case *ast.AssignStmt: r.walkExprs(n.Rhs) if n.Tok == token.DEFINE { r.shortVarDecl(n) } else { r.walkExprs(n.Lhs) } case *ast.BranchStmt: // add to list of unresolved targets if n.Tok != token.FALLTHROUGH && n.Label != nil { depth := len(r.targetStack) - 1 r.targetStack[depth] = append(r.targetStack[depth], n.Label) } case *ast.BlockStmt: r.openScope(n.Pos()) defer r.closeScope() r.walkStmts(n.List) case *ast.IfStmt: r.openScope(n.Pos()) defer r.closeScope() if n.Init != nil { ast.Walk(r, n.Init) } ast.Walk(r, n.Cond) ast.Walk(r, n.Body) if n.Else != nil { ast.Walk(r, n.Else) } case *ast.CaseClause: r.walkExprs(n.List) r.openScope(n.Pos()) defer r.closeScope() r.walkStmts(n.Body) case *ast.SwitchStmt: r.openScope(n.Pos()) defer r.closeScope() if n.Init != nil { ast.Walk(r, n.Init) } if n.Tag != nil { // The scope below reproduces some unnecessary behavior of the parser, // opening an extra scope in case this is a type switch. It's not needed // for expression switches. // TODO: remove this once we've matched the parser resolution exactly. if n.Init != nil { r.openScope(n.Tag.Pos()) defer r.closeScope() } ast.Walk(r, n.Tag) } if n.Body != nil { r.walkStmts(n.Body.List) } case *ast.TypeSwitchStmt: if n.Init != nil { r.openScope(n.Pos()) defer r.closeScope() ast.Walk(r, n.Init) } r.openScope(n.Assign.Pos()) defer r.closeScope() ast.Walk(r, n.Assign) // s.Body consists only of case clauses, so does not get its own // scope. if n.Body != nil { r.walkStmts(n.Body.List) } case *ast.CommClause: r.openScope(n.Pos()) defer r.closeScope() if n.Comm != nil { ast.Walk(r, n.Comm) } r.walkStmts(n.Body) case *ast.SelectStmt: // as for switch statements, select statement bodies don't get their own // scope. if n.Body != nil { r.walkStmts(n.Body.List) } case *ast.ForStmt: r.openScope(n.Pos()) defer r.closeScope() if n.Init != nil { ast.Walk(r, n.Init) } if n.Cond != nil { ast.Walk(r, n.Cond) } if n.Post != nil { ast.Walk(r, n.Post) } ast.Walk(r, n.Body) case *ast.RangeStmt: r.openScope(n.Pos()) defer r.closeScope() ast.Walk(r, n.X) var lhs []ast.Expr if n.Key != nil { lhs = append(lhs, n.Key) } if n.Value != nil { lhs = append(lhs, n.Value) } if len(lhs) > 0 { if n.Tok == token.DEFINE { // Note: we can't exactly match the behavior of object resolution // during the parsing pass here, as it uses the position of the RANGE // token for the RHS OpPos. That information is not contained within // the AST. as := &ast.AssignStmt{ Lhs: lhs, Tok: token.DEFINE, TokPos: n.TokPos, Rhs: []ast.Expr{&ast.UnaryExpr{Op: token.RANGE, X: n.X}}, } // TODO(rFindley): this walkLHS reproduced the parser resolution, but // is it necessary? By comparison, for a normal AssignStmt we don't // walk the LHS in case there is an invalid identifier list. r.walkLHS(lhs) r.shortVarDecl(as) } else { r.walkExprs(lhs) } } ast.Walk(r, n.Body) // Declarations case *ast.GenDecl: switch n.Tok { case token.CONST, token.VAR: for i, spec := range n.Specs { spec := spec.(*ast.ValueSpec) kind := ast.Con if n.Tok == token.VAR { kind = ast.Var } r.walkExprs(spec.Values) if spec.Type != nil { ast.Walk(r, spec.Type) } r.declare(spec, i, r.topScope, kind, spec.Names...) } case token.TYPE: for _, spec := range n.Specs { spec := spec.(*ast.TypeSpec) // Go spec: The scope of a type identifier declared inside a function begins // at the identifier in the TypeSpec and ends at the end of the innermost // containing block. r.declare(spec, nil, r.topScope, ast.Typ, spec.Name) if spec.TypeParams != nil { r.openScope(spec.Pos()) defer r.closeScope() r.walkTParams(spec.TypeParams) } ast.Walk(r, spec.Type) } } case *ast.FuncDecl: // Open the function scope. r.openScope(n.Pos()) defer r.closeScope() r.walkRecv(n.Recv) // Type parameters are walked normally: they can reference each other, and // can be referenced by normal parameters. if n.Type.TypeParams != nil { r.walkTParams(n.Type.TypeParams) // TODO(rFindley): need to address receiver type parameters. } // Resolve and declare parameters in a specific order to get duplicate // declaration errors in the correct location. r.resolveList(n.Type.Params) r.resolveList(n.Type.Results) r.declareList(n.Recv, ast.Var) r.declareList(n.Type.Params, ast.Var) r.declareList(n.Type.Results, ast.Var) r.walkBody(n.Body) if n.Recv == nil && n.Name.Name != "init" { r.declare(n, nil, r.pkgScope, ast.Fun, n.Name) } default: return r } return nil } func (r *resolver) walkFuncType(typ *ast.FuncType) { // typ.TypeParams must be walked separately for FuncDecls. r.resolveList(typ.Params) r.resolveList(typ.Results) r.declareList(typ.Params, ast.Var) r.declareList(typ.Results, ast.Var) } func (r *resolver) resolveList(list *ast.FieldList) { if list == nil { return } for _, f := range list.List { if f.Type != nil { ast.Walk(r, f.Type) } } } func (r *resolver) declareList(list *ast.FieldList, kind ast.ObjKind) { if list == nil { return } for _, f := range list.List { r.declare(f, nil, r.topScope, kind, f.Names...) } } func (r *resolver) walkRecv(recv *ast.FieldList) { // If our receiver has receiver type parameters, we must declare them before // trying to resolve the rest of the receiver, and avoid re-resolving the // type parameter identifiers. if recv == nil || len(recv.List) == 0 { return // nothing to do } typ := recv.List[0].Type if ptr, ok := typ.(*ast.StarExpr); ok { typ = ptr.X } var declareExprs []ast.Expr // exprs to declare var resolveExprs []ast.Expr // exprs to resolve switch typ := typ.(type) { case *ast.IndexExpr: declareExprs = []ast.Expr{typ.Index} resolveExprs = append(resolveExprs, typ.X) case *ast.IndexListExpr: declareExprs = typ.Indices resolveExprs = append(resolveExprs, typ.X) default: resolveExprs = append(resolveExprs, typ) } for _, expr := range declareExprs { if id, _ := expr.(*ast.Ident); id != nil { r.declare(expr, nil, r.topScope, ast.Typ, id) } else { // The receiver type parameter expression is invalid, but try to resolve // it anyway for consistency. resolveExprs = append(resolveExprs, expr) } } for _, expr := range resolveExprs { if expr != nil { ast.Walk(r, expr) } } // The receiver is invalid, but try to resolve it anyway for consistency. for _, f := range recv.List[1:] { if f.Type != nil { ast.Walk(r, f.Type) } } } func (r *resolver) walkFieldList(list *ast.FieldList, kind ast.ObjKind) { if list == nil { return } r.resolveList(list) r.declareList(list, kind) } // walkTParams is like walkFieldList, but declares type parameters eagerly so // that they may be resolved in the constraint expressions held in the field // Type. func (r *resolver) walkTParams(list *ast.FieldList) { r.declareList(list, ast.Typ) r.resolveList(list) } func (r *resolver) walkBody(body *ast.BlockStmt) { if body == nil { return } r.openLabelScope() defer r.closeLabelScope() r.walkStmts(body.List) }
go/src/go/parser/resolver.go/0
{ "file_path": "go/src/go/parser/resolver.go", "repo_id": "go", "token_count": 6539 }
253
This file should not be parsed by ParseDir.
go/src/go/parser/testdata/issue42951/not_a_file.go/invalid.go/0
{ "file_path": "go/src/go/parser/testdata/issue42951/not_a_file.go/invalid.go", "repo_id": "go", "token_count": 12 }
254
package sort type orderedSlice[Elem comparable] []Elem func (s orderedSlice[Elem]) Len() int { return len(s) } func (s orderedSlice[Elem]) Less(i, j int) bool { return s[i] < s[j] } func (s orderedSlice[Elem]) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // OrderedSlice sorts the slice s in ascending order. // The elements of s must be ordered using the < operator. func OrderedSlice[Elem comparable](s []Elem) { sort.Sort(orderedSlice[Elem](s)) } type sliceFn[Elem any] struct { s []Elem f func(Elem, Elem) bool } func (s sliceFn[Elem]) Len() int { return len(s.s) } func (s sliceFn[Elem]) Less(i, j int) bool { return s.f(s.s[i], s.s[j]) } func (s sliceFn[Elem]) Swap(i, j int) { s.s[i], s.s[j] = s.s[j], s.s[i] } // SliceFn sorts the slice s according to the function f. func SliceFn[Elem any](s []Elem, f func(Elem, Elem) bool) { Sort(sliceFn[Elem]{s, f}) }
go/src/go/parser/testdata/sort.go2/0
{ "file_path": "go/src/go/parser/testdata/sort.go2", "repo_id": "go", "token_count": 403 }
255
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package token defines constants representing the lexical tokens of the Go // programming language and basic operations on tokens (printing, predicates). package token import ( "strconv" "unicode" "unicode/utf8" ) // Token is the set of lexical tokens of the Go programming language. type Token int // The list of tokens. const ( // Special tokens ILLEGAL Token = iota EOF COMMENT literal_beg // Identifiers and basic type literals // (these tokens stand for classes of literals) IDENT // main INT // 12345 FLOAT // 123.45 IMAG // 123.45i CHAR // 'a' STRING // "abc" literal_end operator_beg // Operators and delimiters ADD // + SUB // - MUL // * QUO // / REM // % AND // & OR // | XOR // ^ SHL // << SHR // >> AND_NOT // &^ ADD_ASSIGN // += SUB_ASSIGN // -= MUL_ASSIGN // *= QUO_ASSIGN // /= REM_ASSIGN // %= AND_ASSIGN // &= OR_ASSIGN // |= XOR_ASSIGN // ^= SHL_ASSIGN // <<= SHR_ASSIGN // >>= AND_NOT_ASSIGN // &^= LAND // && LOR // || ARROW // <- INC // ++ DEC // -- EQL // == LSS // < GTR // > ASSIGN // = NOT // ! NEQ // != LEQ // <= GEQ // >= DEFINE // := ELLIPSIS // ... LPAREN // ( LBRACK // [ LBRACE // { COMMA // , PERIOD // . RPAREN // ) RBRACK // ] RBRACE // } SEMICOLON // ; COLON // : operator_end keyword_beg // Keywords BREAK CASE CHAN CONST CONTINUE DEFAULT DEFER ELSE FALLTHROUGH FOR FUNC GO GOTO IF IMPORT INTERFACE MAP PACKAGE RANGE RETURN SELECT STRUCT SWITCH TYPE VAR keyword_end additional_beg // additional tokens, handled in an ad-hoc manner TILDE additional_end ) var tokens = [...]string{ ILLEGAL: "ILLEGAL", EOF: "EOF", COMMENT: "COMMENT", IDENT: "IDENT", INT: "INT", FLOAT: "FLOAT", IMAG: "IMAG", CHAR: "CHAR", STRING: "STRING", ADD: "+", SUB: "-", MUL: "*", QUO: "/", REM: "%", AND: "&", OR: "|", XOR: "^", SHL: "<<", SHR: ">>", AND_NOT: "&^", ADD_ASSIGN: "+=", SUB_ASSIGN: "-=", MUL_ASSIGN: "*=", QUO_ASSIGN: "/=", REM_ASSIGN: "%=", AND_ASSIGN: "&=", OR_ASSIGN: "|=", XOR_ASSIGN: "^=", SHL_ASSIGN: "<<=", SHR_ASSIGN: ">>=", AND_NOT_ASSIGN: "&^=", LAND: "&&", LOR: "||", ARROW: "<-", INC: "++", DEC: "--", EQL: "==", LSS: "<", GTR: ">", ASSIGN: "=", NOT: "!", NEQ: "!=", LEQ: "<=", GEQ: ">=", DEFINE: ":=", ELLIPSIS: "...", LPAREN: "(", LBRACK: "[", LBRACE: "{", COMMA: ",", PERIOD: ".", RPAREN: ")", RBRACK: "]", RBRACE: "}", SEMICOLON: ";", COLON: ":", BREAK: "break", CASE: "case", CHAN: "chan", CONST: "const", CONTINUE: "continue", DEFAULT: "default", DEFER: "defer", ELSE: "else", FALLTHROUGH: "fallthrough", FOR: "for", FUNC: "func", GO: "go", GOTO: "goto", IF: "if", IMPORT: "import", INTERFACE: "interface", MAP: "map", PACKAGE: "package", RANGE: "range", RETURN: "return", SELECT: "select", STRUCT: "struct", SWITCH: "switch", TYPE: "type", VAR: "var", TILDE: "~", } // String returns the string corresponding to the token tok. // For operators, delimiters, and keywords the string is the actual // token character sequence (e.g., for the token [ADD], the string is // "+"). For all other tokens the string corresponds to the token // constant name (e.g. for the token [IDENT], the string is "IDENT"). func (tok Token) String() string { s := "" if 0 <= tok && tok < Token(len(tokens)) { s = tokens[tok] } if s == "" { s = "token(" + strconv.Itoa(int(tok)) + ")" } return s } // A set of constants for precedence-based expression parsing. // Non-operators have lowest precedence, followed by operators // starting with precedence 1 up to unary operators. The highest // precedence serves as "catch-all" precedence for selector, // indexing, and other operator and delimiter tokens. const ( LowestPrec = 0 // non-operators UnaryPrec = 6 HighestPrec = 7 ) // Precedence returns the operator precedence of the binary // operator op. If op is not a binary operator, the result // is LowestPrecedence. func (op Token) Precedence() int { switch op { case LOR: return 1 case LAND: return 2 case EQL, NEQ, LSS, LEQ, GTR, GEQ: return 3 case ADD, SUB, OR, XOR: return 4 case MUL, QUO, REM, SHL, SHR, AND, AND_NOT: return 5 } return LowestPrec } var keywords map[string]Token func init() { keywords = make(map[string]Token, keyword_end-(keyword_beg+1)) for i := keyword_beg + 1; i < keyword_end; i++ { keywords[tokens[i]] = i } } // Lookup maps an identifier to its keyword token or [IDENT] (if not a keyword). func Lookup(ident string) Token { if tok, is_keyword := keywords[ident]; is_keyword { return tok } return IDENT } // Predicates // IsLiteral returns true for tokens corresponding to identifiers // and basic type literals; it returns false otherwise. func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } // IsOperator returns true for tokens corresponding to operators and // delimiters; it returns false otherwise. func (tok Token) IsOperator() bool { return (operator_beg < tok && tok < operator_end) || tok == TILDE } // IsKeyword returns true for tokens corresponding to keywords; // it returns false otherwise. func (tok Token) IsKeyword() bool { return keyword_beg < tok && tok < keyword_end } // IsExported reports whether name starts with an upper-case letter. func IsExported(name string) bool { ch, _ := utf8.DecodeRuneInString(name) return unicode.IsUpper(ch) } // IsKeyword reports whether name is a Go keyword, such as "func" or "return". func IsKeyword(name string) bool { // TODO: opt: use a perfect hash function instead of a global map. _, ok := keywords[name] return ok } // IsIdentifier reports whether name is a Go identifier, that is, a non-empty // string made up of letters, digits, and underscores, where the first character // is not a digit. Keywords are not identifiers. func IsIdentifier(name string) bool { if name == "" || IsKeyword(name) { return false } for i, c := range name { if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) { return false } } return true }
go/src/go/token/token.go/0
{ "file_path": "go/src/go/token/token.go", "repo_id": "go", "token_count": 2760 }
256
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types_test import ( "fmt" "go/scanner" "go/token" "regexp" "strings" "testing" ) type comment struct { line, col int // comment position text string // comment text, excluding "//", "/*", or "*/" } // commentMap collects all comments in the given src with comment text // that matches the supplied regular expression rx and returns them as // []comment lists in a map indexed by line number. The comment text is // the comment with any comment markers ("//", "/*", or "*/") stripped. // The position for each comment is the position of the token immediately // preceding the comment, with all comments that are on the same line // collected in a slice, in source order. If there is no preceding token // (the matching comment appears at the beginning of the file), then the // recorded position is unknown (line, col = 0, 0). // If there are no matching comments, the result is nil. func commentMap(src []byte, rx *regexp.Regexp) (res map[int][]comment) { fset := token.NewFileSet() file := fset.AddFile("", -1, len(src)) var s scanner.Scanner s.Init(file, src, nil, scanner.ScanComments) var prev token.Pos // position of last non-comment, non-semicolon token for { pos, tok, lit := s.Scan() switch tok { case token.EOF: return case token.COMMENT: if lit[1] == '*' { lit = lit[:len(lit)-2] // strip trailing */ } lit = lit[2:] // strip leading // or /* if rx.MatchString(lit) { p := fset.Position(prev) err := comment{p.Line, p.Column, lit} if res == nil { res = make(map[int][]comment) } res[p.Line] = append(res[p.Line], err) } case token.SEMICOLON: // ignore automatically inserted semicolon if lit == "\n" { continue } fallthrough default: prev = pos } } } func TestCommentMap(t *testing.T) { const src = `/* ERROR "0:0" */ /* ERROR "0:0" */ // ERROR "0:0" // ERROR "0:0" x /* ERROR "3:1" */ // ignore automatically inserted semicolon here /* ERROR "3:1" */ // position of x on previous line x /* ERROR "5:4" */ ; // do not ignore this semicolon /* ERROR "5:24" */ // position of ; on previous line package /* ERROR "7:2" */ // indented with tab import /* ERROR "8:9" */ // indented with blanks ` m := commentMap([]byte(src), regexp.MustCompile("^ ERROR ")) found := 0 // number of errors found for line, errlist := range m { for _, err := range errlist { if err.line != line { t.Errorf("%v: got map line %d; want %d", err, err.line, line) continue } // err.line == line got := strings.TrimSpace(err.text[len(" ERROR "):]) want := fmt.Sprintf(`"%d:%d"`, line, err.col) if got != want { t.Errorf("%v: got msg %q; want %q", err, got, want) continue } found++ } } want := strings.Count(src, " ERROR ") if found != want { t.Errorf("commentMap got %d errors; want %d", found, want) } }
go/src/go/types/commentMap_test.go/0
{ "file_path": "go/src/go/types/commentMap_test.go", "repo_id": "go", "token_count": 1179 }
257
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements (error and trace) message formatting support. package types import ( "bytes" "fmt" "go/ast" "go/token" "strconv" "strings" ) func sprintf(fset *token.FileSet, qf Qualifier, tpSubscripts bool, format string, args ...any) string { for i, arg := range args { switch a := arg.(type) { case nil: arg = "<nil>" case operand: panic("got operand instead of *operand") case *operand: arg = operandString(a, qf) case token.Pos: if fset != nil { arg = fset.Position(a).String() } case ast.Expr: arg = ExprString(a) case []ast.Expr: var buf bytes.Buffer buf.WriteByte('[') writeExprList(&buf, a) buf.WriteByte(']') arg = buf.String() case Object: arg = ObjectString(a, qf) case Type: var buf bytes.Buffer w := newTypeWriter(&buf, qf) w.tpSubscripts = tpSubscripts w.typ(a) arg = buf.String() case []Type: var buf bytes.Buffer w := newTypeWriter(&buf, qf) w.tpSubscripts = tpSubscripts buf.WriteByte('[') for i, x := range a { if i > 0 { buf.WriteString(", ") } w.typ(x) } buf.WriteByte(']') arg = buf.String() case []*TypeParam: var buf bytes.Buffer w := newTypeWriter(&buf, qf) w.tpSubscripts = tpSubscripts buf.WriteByte('[') for i, x := range a { if i > 0 { buf.WriteString(", ") } w.typ(x) } buf.WriteByte(']') arg = buf.String() } args[i] = arg } return fmt.Sprintf(format, args...) } // check may be nil. func (check *Checker) sprintf(format string, args ...any) string { var fset *token.FileSet var qf Qualifier if check != nil { fset = check.fset qf = check.qualifier } return sprintf(fset, qf, false, format, args...) } func (check *Checker) trace(pos token.Pos, format string, args ...any) { fmt.Printf("%s:\t%s%s\n", check.fset.Position(pos), strings.Repeat(". ", check.indent), sprintf(check.fset, check.qualifier, true, format, args...), ) } // dump is only needed for debugging func (check *Checker) dump(format string, args ...any) { fmt.Println(sprintf(check.fset, check.qualifier, true, format, args...)) } func (check *Checker) qualifier(pkg *Package) string { // Qualify the package unless it's the package being type-checked. if pkg != check.pkg { if check.pkgPathMap == nil { check.pkgPathMap = make(map[string]map[string]bool) check.seenPkgMap = make(map[*Package]bool) check.markImports(check.pkg) } // If the same package name was used by multiple packages, display the full path. if len(check.pkgPathMap[pkg.name]) > 1 { return strconv.Quote(pkg.path) } return pkg.name } return "" } // markImports recursively walks pkg and its imports, to record unique import // paths in pkgPathMap. func (check *Checker) markImports(pkg *Package) { if check.seenPkgMap[pkg] { return } check.seenPkgMap[pkg] = true forName, ok := check.pkgPathMap[pkg.name] if !ok { forName = make(map[string]bool) check.pkgPathMap[pkg.name] = forName } forName[pkg.path] = true for _, imp := range pkg.imports { check.markImports(imp) } } // stripAnnotations removes internal (type) annotations from s. func stripAnnotations(s string) string { var buf strings.Builder for _, r := range s { // strip #'s and subscript digits if r < '₀' || '₀'+10 <= r { // '₀' == U+2080 buf.WriteRune(r) } } if buf.Len() < len(s) { return buf.String() } return s }
go/src/go/types/format.go/0
{ "file_path": "go/src/go/types/format.go", "repo_id": "go", "token_count": 1478 }
258
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types_test import ( "go/importer" "go/token" "path/filepath" "runtime" "testing" . "go/types" ) // BenchmarkLookupFieldOrMethod measures types.LookupFieldOrMethod performance. // LookupFieldOrMethod is a performance hotspot for both type-checking and // external API calls. func BenchmarkLookupFieldOrMethod(b *testing.B) { // Choose an arbitrary, large package. path := filepath.Join(runtime.GOROOT(), "src", "net", "http") fset := token.NewFileSet() files, err := pkgFiles(fset, path) if err != nil { b.Fatal(err) } conf := Config{ Importer: importer.Default(), } pkg, err := conf.Check("http", fset, files, nil) if err != nil { b.Fatal(err) } scope := pkg.Scope() names := scope.Names() // Look up an arbitrary name for each type referenced in the package scope. lookup := func() { for _, name := range names { typ := scope.Lookup(name).Type() LookupFieldOrMethod(typ, true, pkg, "m") } } // Perform a lookup once, to ensure that any lazily-evaluated state is // complete. lookup() b.ResetTimer() for i := 0; i < b.N; i++ { lookup() } }
go/src/go/types/lookup_test.go/0
{ "file_path": "go/src/go/types/lookup_test.go", "repo_id": "go", "token_count": 453 }
259
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types import ( "fmt" "go/ast" "go/constant" "go/internal/typeparams" "go/token" . "internal/types/errors" "sort" "strconv" "strings" "unicode" ) // A declInfo describes a package-level const, type, var, or func declaration. type declInfo struct { file *Scope // scope of file containing this declaration lhs []*Var // lhs of n:1 variable declarations, or nil vtyp ast.Expr // type, or nil (for const and var declarations only) init ast.Expr // init/orig expression, or nil (for const and var declarations only) inherited bool // if set, the init expression is inherited from a previous constant declaration tdecl *ast.TypeSpec // type declaration, or nil fdecl *ast.FuncDecl // func declaration, or nil // The deps field tracks initialization expression dependencies. deps map[Object]bool // lazily initialized } // hasInitializer reports whether the declared object has an initialization // expression or function body. func (d *declInfo) hasInitializer() bool { return d.init != nil || d.fdecl != nil && d.fdecl.Body != nil } // addDep adds obj to the set of objects d's init expression depends on. func (d *declInfo) addDep(obj Object) { m := d.deps if m == nil { m = make(map[Object]bool) d.deps = m } m[obj] = true } // arityMatch checks that the lhs and rhs of a const or var decl // have the appropriate number of names and init exprs. For const // decls, init is the value spec providing the init exprs; for // var decls, init is nil (the init exprs are in s in this case). func (check *Checker) arityMatch(s, init *ast.ValueSpec) { l := len(s.Names) r := len(s.Values) if init != nil { r = len(init.Values) } const code = WrongAssignCount switch { case init == nil && r == 0: // var decl w/o init expr if s.Type == nil { check.error(s, code, "missing type or init expr") } case l < r: if l < len(s.Values) { // init exprs from s n := s.Values[l] check.errorf(n, code, "extra init expr %s", n) // TODO(gri) avoid declared and not used error here } else { // init exprs "inherited" check.errorf(s, code, "extra init expr at %s", check.fset.Position(init.Pos())) // TODO(gri) avoid declared and not used error here } case l > r && (init != nil || r != 1): n := s.Names[r] check.errorf(n, code, "missing init expr for %s", n) } } func validatedImportPath(path string) (string, error) { s, err := strconv.Unquote(path) if err != nil { return "", err } if s == "" { return "", fmt.Errorf("empty string") } const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" for _, r := range s { if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { return s, fmt.Errorf("invalid character %#U", r) } } return s, nil } // declarePkgObj declares obj in the package scope, records its ident -> obj mapping, // and updates check.objMap. The object must not be a function or method. func (check *Checker) declarePkgObj(ident *ast.Ident, obj Object, d *declInfo) { assert(ident.Name == obj.Name()) // spec: "A package-scope or file-scope identifier with name init // may only be declared to be a function with this (func()) signature." if ident.Name == "init" { check.error(ident, InvalidInitDecl, "cannot declare init - must be func") return } // spec: "The main package must have package name main and declare // a function main that takes no arguments and returns no value." if ident.Name == "main" && check.pkg.name == "main" { check.error(ident, InvalidMainDecl, "cannot declare main - must be func") return } check.declare(check.pkg.scope, ident, obj, nopos) check.objMap[obj] = d obj.setOrder(uint32(len(check.objMap))) } // filename returns a filename suitable for debugging output. func (check *Checker) filename(fileNo int) string { file := check.files[fileNo] if pos := file.Pos(); pos.IsValid() { return check.fset.File(pos).Name() } return fmt.Sprintf("file[%d]", fileNo) } func (check *Checker) importPackage(at positioner, path, dir string) *Package { // If we already have a package for the given (path, dir) // pair, use it instead of doing a full import. // Checker.impMap only caches packages that are marked Complete // or fake (dummy packages for failed imports). Incomplete but // non-fake packages do require an import to complete them. key := importKey{path, dir} imp := check.impMap[key] if imp != nil { return imp } // no package yet => import it if path == "C" && (check.conf.FakeImportC || check.conf.go115UsesCgo) { if check.conf.FakeImportC && check.conf.go115UsesCgo { check.error(at, BadImportPath, "cannot use FakeImportC and go115UsesCgo together") } imp = NewPackage("C", "C") imp.fake = true // package scope is not populated imp.cgo = check.conf.go115UsesCgo } else { // ordinary import var err error if importer := check.conf.Importer; importer == nil { err = fmt.Errorf("Config.Importer not installed") } else if importerFrom, ok := importer.(ImporterFrom); ok { imp, err = importerFrom.ImportFrom(path, dir, 0) if imp == nil && err == nil { err = fmt.Errorf("Config.Importer.ImportFrom(%s, %s, 0) returned nil but no error", path, dir) } } else { imp, err = importer.Import(path) if imp == nil && err == nil { err = fmt.Errorf("Config.Importer.Import(%s) returned nil but no error", path) } } // make sure we have a valid package name // (errors here can only happen through manipulation of packages after creation) if err == nil && imp != nil && (imp.name == "_" || imp.name == "") { err = fmt.Errorf("invalid package name: %q", imp.name) imp = nil // create fake package below } if err != nil { check.errorf(at, BrokenImport, "could not import %s (%s)", path, err) if imp == nil { // create a new fake package // come up with a sensible package name (heuristic) name := path if i := len(name); i > 0 && name[i-1] == '/' { name = name[:i-1] } if i := strings.LastIndex(name, "/"); i >= 0 { name = name[i+1:] } imp = NewPackage(path, name) } // continue to use the package as best as we can imp.fake = true // avoid follow-up lookup failures } } // package should be complete or marked fake, but be cautious if imp.complete || imp.fake { check.impMap[key] = imp // Once we've formatted an error message, keep the pkgPathMap // up-to-date on subsequent imports. It is used for package // qualification in error messages. if check.pkgPathMap != nil { check.markImports(imp) } return imp } // something went wrong (importer may have returned incomplete package without error) return nil } // collectObjects collects all file and package objects and inserts them // into their respective scopes. It also performs imports and associates // methods with receiver base type names. func (check *Checker) collectObjects() { pkg := check.pkg // pkgImports is the set of packages already imported by any package file seen // so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate // it (pkg.imports may not be empty if we are checking test files incrementally). // Note that pkgImports is keyed by package (and thus package path), not by an // importKey value. Two different importKey values may map to the same package // which is why we cannot use the check.impMap here. var pkgImports = make(map[*Package]bool) for _, imp := range pkg.imports { pkgImports[imp] = true } type methodInfo struct { obj *Func // method ptr bool // true if pointer receiver recv *ast.Ident // receiver type name } var methods []methodInfo // collected methods with valid receivers and non-blank _ names var fileScopes []*Scope for fileNo, file := range check.files { // The package identifier denotes the current package, // but there is no corresponding package object. check.recordDef(file.Name, nil) // Use the actual source file extent rather than *ast.File extent since the // latter doesn't include comments which appear at the start or end of the file. // Be conservative and use the *ast.File extent if we don't have a *token.File. pos, end := file.Pos(), file.End() if f := check.fset.File(file.Pos()); f != nil { pos, end = token.Pos(f.Base()), token.Pos(f.Base()+f.Size()) } fileScope := NewScope(pkg.scope, pos, end, check.filename(fileNo)) fileScopes = append(fileScopes, fileScope) check.recordScope(file, fileScope) // determine file directory, necessary to resolve imports // FileName may be "" (typically for tests) in which case // we get "." as the directory which is what we would want. fileDir := dir(check.fset.Position(file.Name.Pos()).Filename) check.walkDecls(file.Decls, func(d decl) { switch d := d.(type) { case importDecl: // import package if d.spec.Path.Value == "" { return // error reported by parser } path, err := validatedImportPath(d.spec.Path.Value) if err != nil { check.errorf(d.spec.Path, BadImportPath, "invalid import path (%s)", err) return } imp := check.importPackage(d.spec.Path, path, fileDir) if imp == nil { return } // local name overrides imported package name name := imp.name if d.spec.Name != nil { name = d.spec.Name.Name if path == "C" { // match 1.17 cmd/compile (not prescribed by spec) check.error(d.spec.Name, ImportCRenamed, `cannot rename import "C"`) return } } if name == "init" { check.error(d.spec, InvalidInitDecl, "cannot import package as init - init must be a func") return } // add package to list of explicit imports // (this functionality is provided as a convenience // for clients; it is not needed for type-checking) if !pkgImports[imp] { pkgImports[imp] = true pkg.imports = append(pkg.imports, imp) } pkgName := NewPkgName(d.spec.Pos(), pkg, name, imp) if d.spec.Name != nil { // in a dot-import, the dot represents the package check.recordDef(d.spec.Name, pkgName) } else { check.recordImplicit(d.spec, pkgName) } if imp.fake { // match 1.17 cmd/compile (not prescribed by spec) pkgName.used = true } // add import to file scope check.imports = append(check.imports, pkgName) if name == "." { // dot-import if check.dotImportMap == nil { check.dotImportMap = make(map[dotImportKey]*PkgName) } // merge imported scope with file scope for name, obj := range imp.scope.elems { // Note: Avoid eager resolve(name, obj) here, so we only // resolve dot-imported objects as needed. // A package scope may contain non-exported objects, // do not import them! if token.IsExported(name) { // declare dot-imported object // (Do not use check.declare because it modifies the object // via Object.setScopePos, which leads to a race condition; // the object may be imported into more than one file scope // concurrently. See go.dev/issue/32154.) if alt := fileScope.Lookup(name); alt != nil { err := check.newError(DuplicateDecl) err.addf(d.spec.Name, "%s redeclared in this block", alt.Name()) err.addAltDecl(alt) err.report() } else { fileScope.insert(name, obj) check.dotImportMap[dotImportKey{fileScope, name}] = pkgName } } } } else { // declare imported package object in file scope // (no need to provide s.Name since we called check.recordDef earlier) check.declare(fileScope, nil, pkgName, nopos) } case constDecl: // declare all constants for i, name := range d.spec.Names { obj := NewConst(name.Pos(), pkg, name.Name, nil, constant.MakeInt64(int64(d.iota))) var init ast.Expr if i < len(d.init) { init = d.init[i] } d := &declInfo{file: fileScope, vtyp: d.typ, init: init, inherited: d.inherited} check.declarePkgObj(name, obj, d) } case varDecl: lhs := make([]*Var, len(d.spec.Names)) // If there's exactly one rhs initializer, use // the same declInfo d1 for all lhs variables // so that each lhs variable depends on the same // rhs initializer (n:1 var declaration). var d1 *declInfo if len(d.spec.Values) == 1 { // The lhs elements are only set up after the for loop below, // but that's ok because declareVar only collects the declInfo // for a later phase. d1 = &declInfo{file: fileScope, lhs: lhs, vtyp: d.spec.Type, init: d.spec.Values[0]} } // declare all variables for i, name := range d.spec.Names { obj := NewVar(name.Pos(), pkg, name.Name, nil) lhs[i] = obj di := d1 if di == nil { // individual assignments var init ast.Expr if i < len(d.spec.Values) { init = d.spec.Values[i] } di = &declInfo{file: fileScope, vtyp: d.spec.Type, init: init} } check.declarePkgObj(name, obj, di) } case typeDecl: obj := NewTypeName(d.spec.Name.Pos(), pkg, d.spec.Name.Name, nil) check.declarePkgObj(d.spec.Name, obj, &declInfo{file: fileScope, tdecl: d.spec}) case funcDecl: name := d.decl.Name.Name obj := NewFunc(d.decl.Name.Pos(), pkg, name, nil) // signature set later hasTParamError := false // avoid duplicate type parameter errors if d.decl.Recv.NumFields() == 0 { // regular function if d.decl.Recv != nil { check.error(d.decl.Recv, BadRecv, "method has no receiver") // treat as function } if name == "init" || (name == "main" && check.pkg.name == "main") { code := InvalidInitDecl if name == "main" { code = InvalidMainDecl } if d.decl.Type.TypeParams.NumFields() != 0 { check.softErrorf(d.decl.Type.TypeParams.List[0], code, "func %s must have no type parameters", name) hasTParamError = true } if t := d.decl.Type; t.Params.NumFields() != 0 || t.Results != nil { // TODO(rFindley) Should this be a hard error? check.softErrorf(d.decl.Name, code, "func %s must have no arguments and no return values", name) } } if name == "init" { // don't declare init functions in the package scope - they are invisible obj.parent = pkg.scope check.recordDef(d.decl.Name, obj) // init functions must have a body if d.decl.Body == nil { // TODO(gri) make this error message consistent with the others above check.softErrorf(obj, MissingInitBody, "missing function body") } } else { check.declare(pkg.scope, d.decl.Name, obj, nopos) } } else { // method // TODO(rFindley) earlier versions of this code checked that methods // have no type parameters, but this is checked later // when type checking the function type. Confirm that // we don't need to check tparams here. ptr, base, _ := check.unpackRecv(d.decl.Recv.List[0].Type, false) // (Methods with invalid receiver cannot be associated to a type, and // methods with blank _ names are never found; no need to collect any // of them. They will still be type-checked with all the other functions.) if recv, _ := base.(*ast.Ident); recv != nil && name != "_" { methods = append(methods, methodInfo{obj, ptr, recv}) } check.recordDef(d.decl.Name, obj) } _ = d.decl.Type.TypeParams.NumFields() != 0 && !hasTParamError && check.verifyVersionf(d.decl.Type.TypeParams.List[0], go1_18, "type parameter") info := &declInfo{file: fileScope, fdecl: d.decl} // Methods are not package-level objects but we still track them in the // object map so that we can handle them like regular functions (if the // receiver is invalid); also we need their fdecl info when associating // them with their receiver base type, below. check.objMap[obj] = info obj.setOrder(uint32(len(check.objMap))) } }) } // verify that objects in package and file scopes have different names for _, scope := range fileScopes { for name, obj := range scope.elems { if alt := pkg.scope.Lookup(name); alt != nil { obj = resolve(name, obj) err := check.newError(DuplicateDecl) if pkg, ok := obj.(*PkgName); ok { err.addf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported()) err.addAltDecl(pkg) } else { err.addf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg()) // TODO(gri) dot-imported objects don't have a position; addAltDecl won't print anything err.addAltDecl(obj) } err.report() } } } // Now that we have all package scope objects and all methods, // associate methods with receiver base type name where possible. // Ignore methods that have an invalid receiver. They will be // type-checked later, with regular functions. if methods == nil { return // nothing to do } check.methods = make(map[*TypeName][]*Func) for i := range methods { m := &methods[i] // Determine the receiver base type and associate m with it. ptr, base := check.resolveBaseTypeName(m.ptr, m.recv, fileScopes) if base != nil { m.obj.hasPtrRecv_ = ptr check.methods[base] = append(check.methods[base], m.obj) } } } // unpackRecv unpacks a receiver type expression and returns its components: ptr indicates // whether rtyp is a pointer receiver, base is the receiver base type expression stripped // of its type parameters (if any), and tparams are its type parameter names, if any. The // type parameters are only unpacked if unpackParams is set. For instance, given the rtyp // // *T[A, _] // // ptr is true, base is T, and tparams is [A, _] (assuming unpackParams is set). // Note that base may not be a *ast.Ident for erroneous programs. func (check *Checker) unpackRecv(rtyp ast.Expr, unpackParams bool) (ptr bool, base ast.Expr, tparams []*ast.Ident) { // unpack receiver type base = ast.Unparen(rtyp) if t, _ := base.(*ast.StarExpr); t != nil { ptr = true base = ast.Unparen(t.X) } // unpack type parameters, if any switch base.(type) { case *ast.IndexExpr, *ast.IndexListExpr: ix := typeparams.UnpackIndexExpr(base) base = ix.X if unpackParams { for _, arg := range ix.Indices { var par *ast.Ident switch arg := arg.(type) { case *ast.Ident: par = arg case *ast.BadExpr: // ignore - error already reported by parser case nil: check.error(ix.Orig, InvalidSyntaxTree, "parameterized receiver contains nil parameters") default: check.errorf(arg, BadDecl, "receiver type parameter %s must be an identifier", arg) } if par == nil { par = &ast.Ident{NamePos: arg.Pos(), Name: "_"} } tparams = append(tparams, par) } } } return } // resolveBaseTypeName returns the non-alias base type name for typ, and whether // there was a pointer indirection to get to it. The base type name must be declared // in package scope, and there can be at most one pointer indirection. If no such type // name exists, the returned base is nil. func (check *Checker) resolveBaseTypeName(seenPtr bool, typ ast.Expr, fileScopes []*Scope) (ptr bool, base *TypeName) { // Algorithm: Starting from a type expression, which may be a name, // we follow that type through alias declarations until we reach a // non-alias type name. If we encounter anything but pointer types or // parentheses we're done. If we encounter more than one pointer type // we're done. ptr = seenPtr var seen map[*TypeName]bool for { // Note: this differs from types2, but is necessary. The syntax parser // strips unnecessary parens. typ = ast.Unparen(typ) // check if we have a pointer type if pexpr, _ := typ.(*ast.StarExpr); pexpr != nil { // if we've already seen a pointer, we're done if ptr { return false, nil } ptr = true typ = ast.Unparen(pexpr.X) // continue with pointer base type } // typ must be a name, or a C.name cgo selector. var name string switch typ := typ.(type) { case *ast.Ident: name = typ.Name case *ast.SelectorExpr: // C.struct_foo is a valid type name for packages using cgo. // // Detect this case, and adjust name so that the correct TypeName is // resolved below. if ident, _ := typ.X.(*ast.Ident); ident != nil && ident.Name == "C" { // Check whether "C" actually resolves to an import of "C", by looking // in the appropriate file scope. var obj Object for _, scope := range fileScopes { if scope.Contains(ident.Pos()) { obj = scope.Lookup(ident.Name) } } // If Config.go115UsesCgo is set, the typechecker will resolve Cgo // selectors to their cgo name. We must do the same here. if pname, _ := obj.(*PkgName); pname != nil { if pname.imported.cgo { // only set if Config.go115UsesCgo is set name = "_Ctype_" + typ.Sel.Name } } } if name == "" { return false, nil } default: return false, nil } // name must denote an object found in the current package scope // (note that dot-imported objects are not in the package scope!) obj := check.pkg.scope.Lookup(name) if obj == nil { return false, nil } // the object must be a type name... tname, _ := obj.(*TypeName) if tname == nil { return false, nil } // ... which we have not seen before if seen[tname] { return false, nil } // we're done if tdecl defined tname as a new type // (rather than an alias) tdecl := check.objMap[tname].tdecl // must exist for objects in package scope if !tdecl.Assign.IsValid() { return ptr, tname } // otherwise, continue resolving typ = tdecl.Type if seen == nil { seen = make(map[*TypeName]bool) } seen[tname] = true } } // packageObjects typechecks all package objects, but not function bodies. func (check *Checker) packageObjects() { // process package objects in source order for reproducible results objList := make([]Object, len(check.objMap)) i := 0 for obj := range check.objMap { objList[i] = obj i++ } sort.Sort(inSourceOrder(objList)) // add new methods to already type-checked types (from a prior Checker.Files call) for _, obj := range objList { if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil { check.collectMethods(obj) } } if false && check.conf._EnableAlias { // With Alias nodes we can process declarations in any order. // // TODO(adonovan): unfortunately, Alias nodes // (GODEBUG=gotypesalias=1) don't entirely resolve // problems with cycles. For example, in // GOROOT/test/typeparam/issue50259.go, // // type T[_ any] struct{} // type A T[B] // type B = T[A] // // TypeName A has Type Named during checking, but by // the time the unified export data is written out, // its Type is Invalid. // // Investigate and reenable this branch. for _, obj := range objList { check.objDecl(obj, nil) } } else { // Without Alias nodes, we process non-alias type declarations first, followed by // alias declarations, and then everything else. This appears to avoid most situations // where the type of an alias is needed before it is available. // There may still be cases where this is not good enough (see also go.dev/issue/25838). // In those cases Checker.ident will report an error ("invalid use of type alias"). var aliasList []*TypeName var othersList []Object // everything that's not a type // phase 1: non-alias type declarations for _, obj := range objList { if tname, _ := obj.(*TypeName); tname != nil { if check.objMap[tname].tdecl.Assign.IsValid() { aliasList = append(aliasList, tname) } else { check.objDecl(obj, nil) } } else { othersList = append(othersList, obj) } } // phase 2: alias type declarations for _, obj := range aliasList { check.objDecl(obj, nil) } // phase 3: all other declarations for _, obj := range othersList { check.objDecl(obj, nil) } } // At this point we may have a non-empty check.methods map; this means that not all // entries were deleted at the end of typeDecl because the respective receiver base // types were not found. In that case, an error was reported when declaring those // methods. We can now safely discard this map. check.methods = nil } // inSourceOrder implements the sort.Sort interface. type inSourceOrder []Object func (a inSourceOrder) Len() int { return len(a) } func (a inSourceOrder) Less(i, j int) bool { return a[i].order() < a[j].order() } func (a inSourceOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // unusedImports checks for unused imports. func (check *Checker) unusedImports() { // If function bodies are not checked, packages' uses are likely missing - don't check. if check.conf.IgnoreFuncBodies { return } // spec: "It is illegal (...) to directly import a package without referring to // any of its exported identifiers. To import a package solely for its side-effects // (initialization), use the blank identifier as explicit package name." for _, obj := range check.imports { if !obj.used && obj.name != "_" { check.errorUnusedPkg(obj) } } } func (check *Checker) errorUnusedPkg(obj *PkgName) { // If the package was imported with a name other than the final // import path element, show it explicitly in the error message. // Note that this handles both renamed imports and imports of // packages containing unconventional package declarations. // Note that this uses / always, even on Windows, because Go import // paths always use forward slashes. path := obj.imported.path elem := path if i := strings.LastIndex(elem, "/"); i >= 0 { elem = elem[i+1:] } if obj.name == "" || obj.name == "." || obj.name == elem { check.softErrorf(obj, UnusedImport, "%q imported and not used", path) } else { check.softErrorf(obj, UnusedImport, "%q imported as %s and not used", path, obj.name) } } // dir makes a good-faith attempt to return the directory // portion of path. If path is empty, the result is ".". // (Per the go/build package dependency tests, we cannot import // path/filepath and simply use filepath.Dir.) func dir(path string) string { if i := strings.LastIndexAny(path, `/\`); i > 0 { return path[:i] } // i <= 0 return "." }
go/src/go/types/resolver.go/0
{ "file_path": "go/src/go/types/resolver.go", "repo_id": "go", "token_count": 9830 }
260
// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT. // Source: ../../cmd/compile/internal/types2/termlist_test.go // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types import ( "strings" "testing" ) // maketl makes a term list from a string of the term list. func maketl(s string) termlist { s = strings.ReplaceAll(s, " ", "") names := strings.Split(s, "|") r := make(termlist, len(names)) for i, n := range names { r[i] = testTerm(n) } return r } func TestTermlistAll(t *testing.T) { if !allTermlist.isAll() { t.Errorf("allTermlist is not the set of all types") } } func TestTermlistString(t *testing.T) { for _, want := range []string{ "∅", "𝓤", "int", "~int", "myInt", "∅ | ∅", "𝓤 | 𝓤", "∅ | 𝓤 | int", "∅ | 𝓤 | int | myInt", } { if got := maketl(want).String(); got != want { t.Errorf("(%v).String() == %v", want, got) } } } func TestTermlistIsEmpty(t *testing.T) { for test, want := range map[string]bool{ "∅": true, "∅ | ∅": true, "∅ | ∅ | 𝓤": false, "∅ | ∅ | myInt": false, "𝓤": false, "𝓤 | int": false, "𝓤 | myInt | ∅": false, } { xl := maketl(test) got := xl.isEmpty() if got != want { t.Errorf("(%v).isEmpty() == %v; want %v", test, got, want) } } } func TestTermlistIsAll(t *testing.T) { for test, want := range map[string]bool{ "∅": false, "∅ | ∅": false, "int | ~string": false, "~int | myInt": false, "∅ | ∅ | 𝓤": true, "𝓤": true, "𝓤 | int": true, "myInt | 𝓤": true, } { xl := maketl(test) got := xl.isAll() if got != want { t.Errorf("(%v).isAll() == %v; want %v", test, got, want) } } } func TestTermlistNorm(t *testing.T) { for _, test := range []struct { xl, want string }{ {"∅", "∅"}, {"∅ | ∅", "∅"}, {"∅ | int", "int"}, {"∅ | myInt", "myInt"}, {"𝓤 | int", "𝓤"}, {"𝓤 | myInt", "𝓤"}, {"int | myInt", "int | myInt"}, {"~int | int", "~int"}, {"~int | myInt", "~int"}, {"int | ~string | int", "int | ~string"}, {"~int | string | 𝓤 | ~string | int", "𝓤"}, {"~int | string | myInt | ~string | int", "~int | ~string"}, } { xl := maketl(test.xl) got := maketl(test.xl).norm() if got.String() != test.want { t.Errorf("(%v).norm() = %v; want %v", xl, got, test.want) } } } func TestTermlistUnion(t *testing.T) { for _, test := range []struct { xl, yl, want string }{ {"∅", "∅", "∅"}, {"∅", "𝓤", "𝓤"}, {"∅", "int", "int"}, {"𝓤", "~int", "𝓤"}, {"int", "~int", "~int"}, {"int", "string", "int | string"}, {"int", "myInt", "int | myInt"}, {"~int", "myInt", "~int"}, {"int | string", "~string", "int | ~string"}, {"~int | string", "~string | int", "~int | ~string"}, {"~int | string | ∅", "~string | int", "~int | ~string"}, {"~int | myInt | ∅", "~string | int", "~int | ~string"}, {"~int | string | 𝓤", "~string | int", "𝓤"}, {"~int | string | myInt", "~string | int", "~int | ~string"}, } { xl := maketl(test.xl) yl := maketl(test.yl) got := xl.union(yl).String() if got != test.want { t.Errorf("(%v).union(%v) = %v; want %v", test.xl, test.yl, got, test.want) } } } func TestTermlistIntersect(t *testing.T) { for _, test := range []struct { xl, yl, want string }{ {"∅", "∅", "∅"}, {"∅", "𝓤", "∅"}, {"∅", "int", "∅"}, {"∅", "myInt", "∅"}, {"𝓤", "~int", "~int"}, {"𝓤", "myInt", "myInt"}, {"int", "~int", "int"}, {"int", "string", "∅"}, {"int", "myInt", "∅"}, {"~int", "myInt", "myInt"}, {"int | string", "~string", "string"}, {"~int | string", "~string | int", "int | string"}, {"~int | string | ∅", "~string | int", "int | string"}, {"~int | myInt | ∅", "~string | int", "int"}, {"~int | string | 𝓤", "~string | int", "int | ~string"}, {"~int | string | myInt", "~string | int", "int | string"}, } { xl := maketl(test.xl) yl := maketl(test.yl) got := xl.intersect(yl).String() if got != test.want { t.Errorf("(%v).intersect(%v) = %v; want %v", test.xl, test.yl, got, test.want) } } } func TestTermlistEqual(t *testing.T) { for _, test := range []struct { xl, yl string want bool }{ {"∅", "∅", true}, {"∅", "𝓤", false}, {"𝓤", "𝓤", true}, {"𝓤 | int", "𝓤", true}, {"𝓤 | int", "string | 𝓤", true}, {"𝓤 | myInt", "string | 𝓤", true}, {"int | ~string", "string | int", false}, {"~int | string", "string | myInt", false}, {"int | ~string | ∅", "string | int | ~string", true}, } { xl := maketl(test.xl) yl := maketl(test.yl) got := xl.equal(yl) if got != test.want { t.Errorf("(%v).equal(%v) = %v; want %v", test.xl, test.yl, got, test.want) } } } func TestTermlistIncludes(t *testing.T) { for _, test := range []struct { xl, typ string want bool }{ {"∅", "int", false}, {"𝓤", "int", true}, {"~int", "int", true}, {"int", "string", false}, {"~int", "string", false}, {"~int", "myInt", true}, {"int | string", "string", true}, {"~int | string", "int", true}, {"~int | string", "myInt", true}, {"~int | myInt | ∅", "myInt", true}, {"myInt | ∅ | 𝓤", "int", true}, } { xl := maketl(test.xl) yl := testTerm(test.typ).typ got := xl.includes(yl) if got != test.want { t.Errorf("(%v).includes(%v) = %v; want %v", test.xl, yl, got, test.want) } } } func TestTermlistSupersetOf(t *testing.T) { for _, test := range []struct { xl, typ string want bool }{ {"∅", "∅", true}, {"∅", "𝓤", false}, {"∅", "int", false}, {"𝓤", "∅", true}, {"𝓤", "𝓤", true}, {"𝓤", "int", true}, {"𝓤", "~int", true}, {"𝓤", "myInt", true}, {"~int", "int", true}, {"~int", "~int", true}, {"~int", "myInt", true}, {"int", "~int", false}, {"myInt", "~int", false}, {"int", "string", false}, {"~int", "string", false}, {"int | string", "string", true}, {"int | string", "~string", false}, {"~int | string", "int", true}, {"~int | string", "myInt", true}, {"~int | string | ∅", "string", true}, {"~string | ∅ | 𝓤", "myInt", true}, } { xl := maketl(test.xl) y := testTerm(test.typ) got := xl.supersetOf(y) if got != test.want { t.Errorf("(%v).supersetOf(%v) = %v; want %v", test.xl, y, got, test.want) } } } func TestTermlistSubsetOf(t *testing.T) { for _, test := range []struct { xl, yl string want bool }{ {"∅", "∅", true}, {"∅", "𝓤", true}, {"𝓤", "∅", false}, {"𝓤", "𝓤", true}, {"int", "int | string", true}, {"~int", "int | string", false}, {"~int", "myInt | string", false}, {"myInt", "~int | string", true}, {"~int", "string | string | int | ~int", true}, {"myInt", "string | string | ~int", true}, {"int | string", "string", false}, {"int | string", "string | int", true}, {"int | ~string", "string | int", false}, {"myInt | ~string", "string | int | 𝓤", true}, {"int | ~string", "string | int | ∅ | string", false}, {"int | myInt", "string | ~int | ∅ | string", true}, } { xl := maketl(test.xl) yl := maketl(test.yl) got := xl.subsetOf(yl) if got != test.want { t.Errorf("(%v).subsetOf(%v) = %v; want %v", test.xl, test.yl, got, test.want) } } }
go/src/go/types/termlist_test.go/0
{ "file_path": "go/src/go/types/termlist_test.go", "repo_id": "go", "token_count": 3559 }
261
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" // castagnoliUpdate updates the non-inverted crc with the given data. // func castagnoliUpdate(crc uint32, p []byte) uint32 TEXT ·castagnoliUpdate(SB),NOSPLIT,$0-36 MOVWU crc+0(FP), R9 // CRC value MOVD p+8(FP), R13 // data pointer MOVD p_len+16(FP), R11 // len(p) update: CMP $16, R11 BLT less_than_16 LDP.P 16(R13), (R8, R10) CRC32CX R8, R9 CRC32CX R10, R9 SUB $16, R11 JMP update less_than_16: TBZ $3, R11, less_than_8 MOVD.P 8(R13), R10 CRC32CX R10, R9 less_than_8: TBZ $2, R11, less_than_4 MOVWU.P 4(R13), R10 CRC32CW R10, R9 less_than_4: TBZ $1, R11, less_than_2 MOVHU.P 2(R13), R10 CRC32CH R10, R9 less_than_2: TBZ $0, R11, done MOVBU (R13), R10 CRC32CB R10, R9 done: MOVWU R9, ret+32(FP) RET // ieeeUpdate updates the non-inverted crc with the given data. // func ieeeUpdate(crc uint32, p []byte) uint32 TEXT ·ieeeUpdate(SB),NOSPLIT,$0-36 MOVWU crc+0(FP), R9 // CRC value MOVD p+8(FP), R13 // data pointer MOVD p_len+16(FP), R11 // len(p) update: CMP $16, R11 BLT less_than_16 LDP.P 16(R13), (R8, R10) CRC32X R8, R9 CRC32X R10, R9 SUB $16, R11 JMP update less_than_16: TBZ $3, R11, less_than_8 MOVD.P 8(R13), R10 CRC32X R10, R9 less_than_8: TBZ $2, R11, less_than_4 MOVWU.P 4(R13), R10 CRC32W R10, R9 less_than_4: TBZ $1, R11, less_than_2 MOVHU.P 2(R13), R10 CRC32H R10, R9 less_than_2: TBZ $0, R11, done MOVBU (R13), R10 CRC32B R10, R9 done: MOVWU R9, ret+32(FP) RET
go/src/hash/crc32/crc32_arm64.s/0
{ "file_path": "go/src/hash/crc32/crc32_arm64.s", "repo_id": "go", "token_count": 897 }
262
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fnv import ( "bytes" "encoding" "encoding/binary" "hash" "io" "testing" ) type golden struct { out []byte in string halfState string // marshaled hash state after first half of in written, used by TestGoldenMarshal } var golden32 = []golden{ {[]byte{0x81, 0x1c, 0x9d, 0xc5}, "", "fnv\x01\x81\x1c\x9d\xc5"}, {[]byte{0x05, 0x0c, 0x5d, 0x7e}, "a", "fnv\x01\x81\x1c\x9d\xc5"}, {[]byte{0x70, 0x77, 0x2d, 0x38}, "ab", "fnv\x01\x05\f]~"}, {[]byte{0x43, 0x9c, 0x2f, 0x4b}, "abc", "fnv\x01\x05\f]~"}, } var golden32a = []golden{ {[]byte{0x81, 0x1c, 0x9d, 0xc5}, "", "fnv\x02\x81\x1c\x9d\xc5"}, {[]byte{0xe4, 0x0c, 0x29, 0x2c}, "a", "fnv\x02\x81\x1c\x9d\xc5"}, {[]byte{0x4d, 0x25, 0x05, 0xca}, "ab", "fnv\x02\xe4\f),"}, {[]byte{0x1a, 0x47, 0xe9, 0x0b}, "abc", "fnv\x02\xe4\f),"}, } var golden64 = []golden{ {[]byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25}, "", "fnv\x03\xcb\xf2\x9c\xe4\x84\"#%"}, {[]byte{0xaf, 0x63, 0xbd, 0x4c, 0x86, 0x01, 0xb7, 0xbe}, "a", "fnv\x03\xcb\xf2\x9c\xe4\x84\"#%"}, {[]byte{0x08, 0x32, 0x67, 0x07, 0xb4, 0xeb, 0x37, 0xb8}, "ab", "fnv\x03\xafc\xbdL\x86\x01\xb7\xbe"}, {[]byte{0xd8, 0xdc, 0xca, 0x18, 0x6b, 0xaf, 0xad, 0xcb}, "abc", "fnv\x03\xafc\xbdL\x86\x01\xb7\xbe"}, } var golden64a = []golden{ {[]byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25}, "", "fnv\x04\xcb\xf2\x9c\xe4\x84\"#%"}, {[]byte{0xaf, 0x63, 0xdc, 0x4c, 0x86, 0x01, 0xec, 0x8c}, "a", "fnv\x04\xcb\xf2\x9c\xe4\x84\"#%"}, {[]byte{0x08, 0x9c, 0x44, 0x07, 0xb5, 0x45, 0x98, 0x6a}, "ab", "fnv\x04\xafc\xdcL\x86\x01\xec\x8c"}, {[]byte{0xe7, 0x1f, 0xa2, 0x19, 0x05, 0x41, 0x57, 0x4b}, "abc", "fnv\x04\xafc\xdcL\x86\x01\xec\x8c"}, } var golden128 = []golden{ {[]byte{0x6c, 0x62, 0x27, 0x2e, 0x07, 0xbb, 0x01, 0x42, 0x62, 0xb8, 0x21, 0x75, 0x62, 0x95, 0xc5, 0x8d}, "", "fnv\x05lb'.\a\xbb\x01Bb\xb8!ub\x95ō"}, {[]byte{0xd2, 0x28, 0xcb, 0x69, 0x10, 0x1a, 0x8c, 0xaf, 0x78, 0x91, 0x2b, 0x70, 0x4e, 0x4a, 0x14, 0x1e}, "a", "fnv\x05lb'.\a\xbb\x01Bb\xb8!ub\x95ō"}, {[]byte{0x8, 0x80, 0x94, 0x5a, 0xee, 0xab, 0x1b, 0xe9, 0x5a, 0xa0, 0x73, 0x30, 0x55, 0x26, 0xc0, 0x88}, "ab", "fnv\x05\xd2(\xcbi\x10\x1a\x8c\xafx\x91+pNJ\x14\x1e"}, {[]byte{0xa6, 0x8b, 0xb2, 0xa4, 0x34, 0x8b, 0x58, 0x22, 0x83, 0x6d, 0xbc, 0x78, 0xc6, 0xae, 0xe7, 0x3b}, "abc", "fnv\x05\xd2(\xcbi\x10\x1a\x8c\xafx\x91+pNJ\x14\x1e"}, } var golden128a = []golden{ {[]byte{0x6c, 0x62, 0x27, 0x2e, 0x07, 0xbb, 0x01, 0x42, 0x62, 0xb8, 0x21, 0x75, 0x62, 0x95, 0xc5, 0x8d}, "", "fnv\x06lb'.\a\xbb\x01Bb\xb8!ub\x95ō"}, {[]byte{0xd2, 0x28, 0xcb, 0x69, 0x6f, 0x1a, 0x8c, 0xaf, 0x78, 0x91, 0x2b, 0x70, 0x4e, 0x4a, 0x89, 0x64}, "a", "fnv\x06lb'.\a\xbb\x01Bb\xb8!ub\x95ō"}, {[]byte{0x08, 0x80, 0x95, 0x44, 0xbb, 0xab, 0x1b, 0xe9, 0x5a, 0xa0, 0x73, 0x30, 0x55, 0xb6, 0x9a, 0x62}, "ab", "fnv\x06\xd2(\xcbio\x1a\x8c\xafx\x91+pNJ\x89d"}, {[]byte{0xa6, 0x8d, 0x62, 0x2c, 0xec, 0x8b, 0x58, 0x22, 0x83, 0x6d, 0xbc, 0x79, 0x77, 0xaf, 0x7f, 0x3b}, "abc", "fnv\x06\xd2(\xcbio\x1a\x8c\xafx\x91+pNJ\x89d"}, } func TestGolden32(t *testing.T) { testGolden(t, New32(), golden32) } func TestGolden32a(t *testing.T) { testGolden(t, New32a(), golden32a) } func TestGolden64(t *testing.T) { testGolden(t, New64(), golden64) } func TestGolden64a(t *testing.T) { testGolden(t, New64a(), golden64a) } func TestGolden128(t *testing.T) { testGolden(t, New128(), golden128) } func TestGolden128a(t *testing.T) { testGolden(t, New128a(), golden128a) } func testGolden(t *testing.T, hash hash.Hash, gold []golden) { for _, g := range gold { hash.Reset() done, error := hash.Write([]byte(g.in)) if error != nil { t.Fatalf("write error: %s", error) } if done != len(g.in) { t.Fatalf("wrote only %d out of %d bytes", done, len(g.in)) } if actual := hash.Sum(nil); !bytes.Equal(g.out, actual) { t.Errorf("hash(%q) = 0x%x want 0x%x", g.in, actual, g.out) } } } func TestGoldenMarshal(t *testing.T) { tests := []struct { name string newHash func() hash.Hash gold []golden }{ {"32", func() hash.Hash { return New32() }, golden32}, {"32a", func() hash.Hash { return New32a() }, golden32a}, {"64", func() hash.Hash { return New64() }, golden64}, {"64a", func() hash.Hash { return New64a() }, golden64a}, {"128", func() hash.Hash { return New128() }, golden128}, {"128a", func() hash.Hash { return New128a() }, golden128a}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, g := range tt.gold { h := tt.newHash() h2 := tt.newHash() io.WriteString(h, g.in[:len(g.in)/2]) state, err := h.(encoding.BinaryMarshaler).MarshalBinary() if err != nil { t.Errorf("could not marshal: %v", err) continue } if string(state) != g.halfState { t.Errorf("checksum(%q) state = %q, want %q", g.in, state, g.halfState) continue } if err := h2.(encoding.BinaryUnmarshaler).UnmarshalBinary(state); err != nil { t.Errorf("could not unmarshal: %v", err) continue } io.WriteString(h, g.in[len(g.in)/2:]) io.WriteString(h2, g.in[len(g.in)/2:]) if actual, actual2 := h.Sum(nil), h2.Sum(nil); !bytes.Equal(actual, actual2) { t.Errorf("hash(%q) = 0x%x != marshaled 0x%x", g.in, actual, actual2) } } }) } } func TestIntegrity32(t *testing.T) { testIntegrity(t, New32()) } func TestIntegrity32a(t *testing.T) { testIntegrity(t, New32a()) } func TestIntegrity64(t *testing.T) { testIntegrity(t, New64()) } func TestIntegrity64a(t *testing.T) { testIntegrity(t, New64a()) } func TestIntegrity128(t *testing.T) { testIntegrity(t, New128()) } func TestIntegrity128a(t *testing.T) { testIntegrity(t, New128a()) } func testIntegrity(t *testing.T, h hash.Hash) { data := []byte{'1', '2', 3, 4, 5} h.Write(data) sum := h.Sum(nil) if size := h.Size(); size != len(sum) { t.Fatalf("Size()=%d but len(Sum())=%d", size, len(sum)) } if a := h.Sum(nil); !bytes.Equal(sum, a) { t.Fatalf("first Sum()=0x%x, second Sum()=0x%x", sum, a) } h.Reset() h.Write(data) if a := h.Sum(nil); !bytes.Equal(sum, a) { t.Fatalf("Sum()=0x%x, but after Reset() Sum()=0x%x", sum, a) } h.Reset() h.Write(data[:2]) h.Write(data[2:]) if a := h.Sum(nil); !bytes.Equal(sum, a) { t.Fatalf("Sum()=0x%x, but with partial writes, Sum()=0x%x", sum, a) } switch h.Size() { case 4: sum32 := h.(hash.Hash32).Sum32() if sum32 != binary.BigEndian.Uint32(sum) { t.Fatalf("Sum()=0x%x, but Sum32()=0x%x", sum, sum32) } case 8: sum64 := h.(hash.Hash64).Sum64() if sum64 != binary.BigEndian.Uint64(sum) { t.Fatalf("Sum()=0x%x, but Sum64()=0x%x", sum, sum64) } case 16: // There's no Sum128 function, so we don't need to test anything here. } } func BenchmarkFnv32KB(b *testing.B) { benchmarkKB(b, New32()) } func BenchmarkFnv32aKB(b *testing.B) { benchmarkKB(b, New32a()) } func BenchmarkFnv64KB(b *testing.B) { benchmarkKB(b, New64()) } func BenchmarkFnv64aKB(b *testing.B) { benchmarkKB(b, New64a()) } func BenchmarkFnv128KB(b *testing.B) { benchmarkKB(b, New128()) } func BenchmarkFnv128aKB(b *testing.B) { benchmarkKB(b, New128a()) } func benchmarkKB(b *testing.B, h hash.Hash) { b.SetBytes(1024) data := make([]byte, 1024) for i := range data { data[i] = byte(i) } in := make([]byte, 0, h.Size()) b.ResetTimer() for i := 0; i < b.N; i++ { h.Reset() h.Write(data) h.Sum(in) } }
go/src/hash/fnv/fnv_test.go/0
{ "file_path": "go/src/hash/fnv/fnv_test.go", "repo_id": "go", "token_count": 4012 }
263
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import "testing" func FuzzEscapeUnescape(f *testing.F) { f.Fuzz(func(t *testing.T, v string) { e := EscapeString(v) u := UnescapeString(e) if u != v { t.Errorf("EscapeString(%q) = %q, UnescapeString(%q) = %q, want %q", v, e, e, u, v) } // As per the documentation, this isn't always equal to v, so it makes // no sense to check for equality. It can still be interesting to find // panics in it though. EscapeString(UnescapeString(v)) }) }
go/src/html/fuzz_test.go/0
{ "file_path": "go/src/html/fuzz_test.go", "repo_id": "go", "token_count": 232 }
264
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package template import ( "bytes" "strings" ) // transitionFunc is the array of context transition functions for text nodes. // A transition function takes a context and template text input, and returns // the updated context and the number of bytes consumed from the front of the // input. var transitionFunc = [...]func(context, []byte) (context, int){ stateText: tText, stateTag: tTag, stateAttrName: tAttrName, stateAfterName: tAfterName, stateBeforeValue: tBeforeValue, stateHTMLCmt: tHTMLCmt, stateRCDATA: tSpecialTagEnd, stateAttr: tAttr, stateURL: tURL, stateSrcset: tURL, stateJS: tJS, stateJSDqStr: tJSDelimited, stateJSSqStr: tJSDelimited, stateJSRegexp: tJSDelimited, stateJSTmplLit: tJSTmpl, stateJSBlockCmt: tBlockCmt, stateJSLineCmt: tLineCmt, stateJSHTMLOpenCmt: tLineCmt, stateJSHTMLCloseCmt: tLineCmt, stateCSS: tCSS, stateCSSDqStr: tCSSStr, stateCSSSqStr: tCSSStr, stateCSSDqURL: tCSSStr, stateCSSSqURL: tCSSStr, stateCSSURL: tCSSStr, stateCSSBlockCmt: tBlockCmt, stateCSSLineCmt: tLineCmt, stateError: tError, } var commentStart = []byte("<!--") var commentEnd = []byte("-->") // tText is the context transition function for the text state. func tText(c context, s []byte) (context, int) { k := 0 for { i := k + bytes.IndexByte(s[k:], '<') if i < k || i+1 == len(s) { return c, len(s) } else if i+4 <= len(s) && bytes.Equal(commentStart, s[i:i+4]) { return context{state: stateHTMLCmt}, i + 4 } i++ end := false if s[i] == '/' { if i+1 == len(s) { return c, len(s) } end, i = true, i+1 } j, e := eatTagName(s, i) if j != i { if end { e = elementNone } // We've found an HTML tag. return context{state: stateTag, element: e}, j } k = j } } var elementContentType = [...]state{ elementNone: stateText, elementScript: stateJS, elementStyle: stateCSS, elementTextarea: stateRCDATA, elementTitle: stateRCDATA, } // tTag is the context transition function for the tag state. func tTag(c context, s []byte) (context, int) { // Find the attribute name. i := eatWhiteSpace(s, 0) if i == len(s) { return c, len(s) } if s[i] == '>' { return context{ state: elementContentType[c.element], element: c.element, }, i + 1 } j, err := eatAttrName(s, i) if err != nil { return context{state: stateError, err: err}, len(s) } state, attr := stateTag, attrNone if i == j { return context{ state: stateError, err: errorf(ErrBadHTML, nil, 0, "expected space, attr name, or end of tag, but got %q", s[i:]), }, len(s) } attrName := strings.ToLower(string(s[i:j])) if c.element == elementScript && attrName == "type" { attr = attrScriptType } else { switch attrType(attrName) { case contentTypeURL: attr = attrURL case contentTypeCSS: attr = attrStyle case contentTypeJS: attr = attrScript case contentTypeSrcset: attr = attrSrcset } } if j == len(s) { state = stateAttrName } else { state = stateAfterName } return context{state: state, element: c.element, attr: attr}, j } // tAttrName is the context transition function for stateAttrName. func tAttrName(c context, s []byte) (context, int) { i, err := eatAttrName(s, 0) if err != nil { return context{state: stateError, err: err}, len(s) } else if i != len(s) { c.state = stateAfterName } return c, i } // tAfterName is the context transition function for stateAfterName. func tAfterName(c context, s []byte) (context, int) { // Look for the start of the value. i := eatWhiteSpace(s, 0) if i == len(s) { return c, len(s) } else if s[i] != '=' { // Occurs due to tag ending '>', and valueless attribute. c.state = stateTag return c, i } c.state = stateBeforeValue // Consume the "=". return c, i + 1 } var attrStartStates = [...]state{ attrNone: stateAttr, attrScript: stateJS, attrScriptType: stateAttr, attrStyle: stateCSS, attrURL: stateURL, attrSrcset: stateSrcset, } // tBeforeValue is the context transition function for stateBeforeValue. func tBeforeValue(c context, s []byte) (context, int) { i := eatWhiteSpace(s, 0) if i == len(s) { return c, len(s) } // Find the attribute delimiter. delim := delimSpaceOrTagEnd switch s[i] { case '\'': delim, i = delimSingleQuote, i+1 case '"': delim, i = delimDoubleQuote, i+1 } c.state, c.delim = attrStartStates[c.attr], delim return c, i } // tHTMLCmt is the context transition function for stateHTMLCmt. func tHTMLCmt(c context, s []byte) (context, int) { if i := bytes.Index(s, commentEnd); i != -1 { return context{}, i + 3 } return c, len(s) } // specialTagEndMarkers maps element types to the character sequence that // case-insensitively signals the end of the special tag body. var specialTagEndMarkers = [...][]byte{ elementScript: []byte("script"), elementStyle: []byte("style"), elementTextarea: []byte("textarea"), elementTitle: []byte("title"), } var ( specialTagEndPrefix = []byte("</") tagEndSeparators = []byte("> \t\n\f/") ) // tSpecialTagEnd is the context transition function for raw text and RCDATA // element states. func tSpecialTagEnd(c context, s []byte) (context, int) { if c.element != elementNone { // script end tags ("</script") within script literals are ignored, so that // we can properly escape them. if c.element == elementScript && (isInScriptLiteral(c.state) || isComment(c.state)) { return c, len(s) } if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 { return context{}, i } } return c, len(s) } // indexTagEnd finds the index of a special tag end in a case insensitive way, or returns -1 func indexTagEnd(s []byte, tag []byte) int { res := 0 plen := len(specialTagEndPrefix) for len(s) > 0 { // Try to find the tag end prefix first i := bytes.Index(s, specialTagEndPrefix) if i == -1 { return i } s = s[i+plen:] // Try to match the actual tag if there is still space for it if len(tag) <= len(s) && bytes.EqualFold(tag, s[:len(tag)]) { s = s[len(tag):] // Check the tag is followed by a proper separator if len(s) > 0 && bytes.IndexByte(tagEndSeparators, s[0]) != -1 { return res + i } res += len(tag) } res += i + plen } return -1 } // tAttr is the context transition function for the attribute state. func tAttr(c context, s []byte) (context, int) { return c, len(s) } // tURL is the context transition function for the URL state. func tURL(c context, s []byte) (context, int) { if bytes.ContainsAny(s, "#?") { c.urlPart = urlPartQueryOrFrag } else if len(s) != eatWhiteSpace(s, 0) && c.urlPart == urlPartNone { // HTML5 uses "Valid URL potentially surrounded by spaces" for // attrs: https://www.w3.org/TR/html5/index.html#attributes-1 c.urlPart = urlPartPreQuery } return c, len(s) } // tJS is the context transition function for the JS state. func tJS(c context, s []byte) (context, int) { i := bytes.IndexAny(s, "\"`'/{}<-#") if i == -1 { // Entire input is non string, comment, regexp tokens. c.jsCtx = nextJSCtx(s, c.jsCtx) return c, len(s) } c.jsCtx = nextJSCtx(s[:i], c.jsCtx) switch s[i] { case '"': c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp case '\'': c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp case '`': c.state, c.jsCtx = stateJSTmplLit, jsCtxRegexp case '/': switch { case i+1 < len(s) && s[i+1] == '/': c.state, i = stateJSLineCmt, i+1 case i+1 < len(s) && s[i+1] == '*': c.state, i = stateJSBlockCmt, i+1 case c.jsCtx == jsCtxRegexp: c.state = stateJSRegexp case c.jsCtx == jsCtxDivOp: c.jsCtx = jsCtxRegexp default: return context{ state: stateError, err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]), }, len(s) } // ECMAScript supports HTML style comments for legacy reasons, see Appendix // B.1.1 "HTML-like Comments". The handling of these comments is somewhat // confusing. Multi-line comments are not supported, i.e. anything on lines // between the opening and closing tokens is not considered a comment, but // anything following the opening or closing token, on the same line, is // ignored. As such we simply treat any line prefixed with "<!--" or "-->" // as if it were actually prefixed with "//" and move on. case '<': if i+3 < len(s) && bytes.Equal(commentStart, s[i:i+4]) { c.state, i = stateJSHTMLOpenCmt, i+3 } case '-': if i+2 < len(s) && bytes.Equal(commentEnd, s[i:i+3]) { c.state, i = stateJSHTMLCloseCmt, i+2 } // ECMAScript also supports "hashbang" comment lines, see Section 12.5. case '#': if i+1 < len(s) && s[i+1] == '!' { c.state, i = stateJSLineCmt, i+1 } case '{': // We only care about tracking brace depth if we are inside of a // template literal. if len(c.jsBraceDepth) == 0 { return c, i + 1 } c.jsBraceDepth[len(c.jsBraceDepth)-1]++ case '}': if len(c.jsBraceDepth) == 0 { return c, i + 1 } // There are no cases where a brace can be escaped in the JS context // that are not syntax errors, it seems. Because of this we can just // count "\}" as "}" and move on, the script is already broken as // fully fledged parsers will just fail anyway. c.jsBraceDepth[len(c.jsBraceDepth)-1]-- if c.jsBraceDepth[len(c.jsBraceDepth)-1] >= 0 { return c, i + 1 } c.jsBraceDepth = c.jsBraceDepth[:len(c.jsBraceDepth)-1] c.state = stateJSTmplLit default: panic("unreachable") } return c, i + 1 } func tJSTmpl(c context, s []byte) (context, int) { var k int for { i := k + bytes.IndexAny(s[k:], "`\\$") if i < k { break } switch s[i] { case '\\': i++ if i == len(s) { return context{ state: stateError, err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s), }, len(s) } case '$': if len(s) >= i+2 && s[i+1] == '{' { c.jsBraceDepth = append(c.jsBraceDepth, 0) c.state = stateJS return c, i + 2 } case '`': // end c.state = stateJS return c, i + 1 } k = i + 1 } return c, len(s) } // tJSDelimited is the context transition function for the JS string and regexp // states. func tJSDelimited(c context, s []byte) (context, int) { specials := `\"` switch c.state { case stateJSSqStr: specials = `\'` case stateJSRegexp: specials = `\/[]` } k, inCharset := 0, false for { i := k + bytes.IndexAny(s[k:], specials) if i < k { break } switch s[i] { case '\\': i++ if i == len(s) { return context{ state: stateError, err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s), }, len(s) } case '[': inCharset = true case ']': inCharset = false case '/': // If "</script" appears in a regex literal, the '/' should not // close the regex literal, and it will later be escaped to // "\x3C/script" in escapeText. if i > 0 && i+7 <= len(s) && bytes.Equal(bytes.ToLower(s[i-1:i+7]), []byte("</script")) { i++ } else if !inCharset { c.state, c.jsCtx = stateJS, jsCtxDivOp return c, i + 1 } default: // end delimiter if !inCharset { c.state, c.jsCtx = stateJS, jsCtxDivOp return c, i + 1 } } k = i + 1 } if inCharset { // This can be fixed by making context richer if interpolation // into charsets is desired. return context{ state: stateError, err: errorf(ErrPartialCharset, nil, 0, "unfinished JS regexp charset: %q", s), }, len(s) } return c, len(s) } var blockCommentEnd = []byte("*/") // tBlockCmt is the context transition function for /*comment*/ states. func tBlockCmt(c context, s []byte) (context, int) { i := bytes.Index(s, blockCommentEnd) if i == -1 { return c, len(s) } switch c.state { case stateJSBlockCmt: c.state = stateJS case stateCSSBlockCmt: c.state = stateCSS default: panic(c.state.String()) } return c, i + 2 } // tLineCmt is the context transition function for //comment states, and the JS HTML-like comment state. func tLineCmt(c context, s []byte) (context, int) { var lineTerminators string var endState state switch c.state { case stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt: lineTerminators, endState = "\n\r\u2028\u2029", stateJS case stateCSSLineCmt: lineTerminators, endState = "\n\f\r", stateCSS // Line comments are not part of any published CSS standard but // are supported by the 4 major browsers. // This defines line comments as // LINECOMMENT ::= "//" [^\n\f\d]* // since https://www.w3.org/TR/css3-syntax/#SUBTOK-nl defines // newlines: // nl ::= #xA | #xD #xA | #xD | #xC default: panic(c.state.String()) } i := bytes.IndexAny(s, lineTerminators) if i == -1 { return c, len(s) } c.state = endState // Per section 7.4 of EcmaScript 5 : https://es5.github.io/#x7.4 // "However, the LineTerminator at the end of the line is not // considered to be part of the single-line comment; it is // recognized separately by the lexical grammar and becomes part // of the stream of input elements for the syntactic grammar." return c, i } // tCSS is the context transition function for the CSS state. func tCSS(c context, s []byte) (context, int) { // CSS quoted strings are almost never used except for: // (1) URLs as in background: "/foo.png" // (2) Multiword font-names as in font-family: "Times New Roman" // (3) List separators in content values as in inline-lists: // <style> // ul.inlineList { list-style: none; padding:0 } // ul.inlineList > li { display: inline } // ul.inlineList > li:before { content: ", " } // ul.inlineList > li:first-child:before { content: "" } // </style> // <ul class=inlineList><li>One<li>Two<li>Three</ul> // (4) Attribute value selectors as in a[href="http://example.com/"] // // We conservatively treat all strings as URLs, but make some // allowances to avoid confusion. // // In (1), our conservative assumption is justified. // In (2), valid font names do not contain ':', '?', or '#', so our // conservative assumption is fine since we will never transition past // urlPartPreQuery. // In (3), our protocol heuristic should not be tripped, and there // should not be non-space content after a '?' or '#', so as long as // we only %-encode RFC 3986 reserved characters we are ok. // In (4), we should URL escape for URL attributes, and for others we // have the attribute name available if our conservative assumption // proves problematic for real code. k := 0 for { i := k + bytes.IndexAny(s[k:], `("'/`) if i < k { return c, len(s) } switch s[i] { case '(': // Look for url to the left. p := bytes.TrimRight(s[:i], "\t\n\f\r ") if endsWithCSSKeyword(p, "url") { j := len(s) - len(bytes.TrimLeft(s[i+1:], "\t\n\f\r ")) switch { case j != len(s) && s[j] == '"': c.state, j = stateCSSDqURL, j+1 case j != len(s) && s[j] == '\'': c.state, j = stateCSSSqURL, j+1 default: c.state = stateCSSURL } return c, j } case '/': if i+1 < len(s) { switch s[i+1] { case '/': c.state = stateCSSLineCmt return c, i + 2 case '*': c.state = stateCSSBlockCmt return c, i + 2 } } case '"': c.state = stateCSSDqStr return c, i + 1 case '\'': c.state = stateCSSSqStr return c, i + 1 } k = i + 1 } } // tCSSStr is the context transition function for the CSS string and URL states. func tCSSStr(c context, s []byte) (context, int) { var endAndEsc string switch c.state { case stateCSSDqStr, stateCSSDqURL: endAndEsc = `\"` case stateCSSSqStr, stateCSSSqURL: endAndEsc = `\'` case stateCSSURL: // Unquoted URLs end with a newline or close parenthesis. // The below includes the wc (whitespace character) and nl. endAndEsc = "\\\t\n\f\r )" default: panic(c.state.String()) } k := 0 for { i := k + bytes.IndexAny(s[k:], endAndEsc) if i < k { c, nread := tURL(c, decodeCSS(s[k:])) return c, k + nread } if s[i] == '\\' { i++ if i == len(s) { return context{ state: stateError, err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in CSS string: %q", s), }, len(s) } } else { c.state = stateCSS return c, i + 1 } c, _ = tURL(c, decodeCSS(s[:i+1])) k = i + 1 } } // tError is the context transition function for the error state. func tError(c context, s []byte) (context, int) { return c, len(s) } // eatAttrName returns the largest j such that s[i:j] is an attribute name. // It returns an error if s[i:] does not look like it begins with an // attribute name, such as encountering a quote mark without a preceding // equals sign. func eatAttrName(s []byte, i int) (int, *Error) { for j := i; j < len(s); j++ { switch s[j] { case ' ', '\t', '\n', '\f', '\r', '=', '>': return j, nil case '\'', '"', '<': // These result in a parse warning in HTML5 and are // indicative of serious problems if seen in an attr // name in a template. return -1, errorf(ErrBadHTML, nil, 0, "%q in attribute name: %.32q", s[j:j+1], s) default: // No-op. } } return len(s), nil } var elementNameMap = map[string]element{ "script": elementScript, "style": elementStyle, "textarea": elementTextarea, "title": elementTitle, } // asciiAlpha reports whether c is an ASCII letter. func asciiAlpha(c byte) bool { return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' } // asciiAlphaNum reports whether c is an ASCII letter or digit. func asciiAlphaNum(c byte) bool { return asciiAlpha(c) || '0' <= c && c <= '9' } // eatTagName returns the largest j such that s[i:j] is a tag name and the tag type. func eatTagName(s []byte, i int) (int, element) { if i == len(s) || !asciiAlpha(s[i]) { return i, elementNone } j := i + 1 for j < len(s) { x := s[j] if asciiAlphaNum(x) { j++ continue } // Allow "x-y" or "x:y" but not "x-", "-y", or "x--y". if (x == ':' || x == '-') && j+1 < len(s) && asciiAlphaNum(s[j+1]) { j += 2 continue } break } return j, elementNameMap[strings.ToLower(string(s[i:j]))] } // eatWhiteSpace returns the largest j such that s[i:j] is white space. func eatWhiteSpace(s []byte, i int) int { for j := i; j < len(s); j++ { switch s[j] { case ' ', '\t', '\n', '\f', '\r': // No-op. default: return j } } return len(s) }
go/src/html/template/transition.go/0
{ "file_path": "go/src/html/template/transition.go", "repo_id": "go", "token_count": 7802 }
265
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package draw provides image composition functions. // // See "The Go image/draw package" for an introduction to this package: // https://golang.org/doc/articles/image_draw.html package draw import ( "image" "image/color" "image/internal/imageutil" ) // m is the maximum color value returned by image.Color.RGBA. const m = 1<<16 - 1 // Image is an image.Image with a Set method to change a single pixel. type Image interface { image.Image Set(x, y int, c color.Color) } // RGBA64Image extends both the [Image] and [image.RGBA64Image] interfaces with a // SetRGBA64 method to change a single pixel. SetRGBA64 is equivalent to // calling Set, but it can avoid allocations from converting concrete color // types to the [color.Color] interface type. type RGBA64Image interface { image.RGBA64Image Set(x, y int, c color.Color) SetRGBA64(x, y int, c color.RGBA64) } // Quantizer produces a palette for an image. type Quantizer interface { // Quantize appends up to cap(p) - len(p) colors to p and returns the // updated palette suitable for converting m to a paletted image. Quantize(p color.Palette, m image.Image) color.Palette } // Op is a Porter-Duff compositing operator. type Op int const ( // Over specifies ``(src in mask) over dst''. Over Op = iota // Src specifies ``src in mask''. Src ) // Draw implements the [Drawer] interface by calling the Draw function with this // [Op]. func (op Op) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) { DrawMask(dst, r, src, sp, nil, image.Point{}, op) } // Drawer contains the [Draw] method. type Drawer interface { // Draw aligns r.Min in dst with sp in src and then replaces the // rectangle r in dst with the result of drawing src on dst. Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) } // FloydSteinberg is a [Drawer] that is the [Src] [Op] with Floyd-Steinberg error // diffusion. var FloydSteinberg Drawer = floydSteinberg{} type floydSteinberg struct{} func (floydSteinberg) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) { clip(dst, &r, src, &sp, nil, nil) if r.Empty() { return } drawPaletted(dst, r, src, sp, true) } // clip clips r against each image's bounds (after translating into the // destination image's coordinate space) and shifts the points sp and mp by // the same amount as the change in r.Min. func clip(dst Image, r *image.Rectangle, src image.Image, sp *image.Point, mask image.Image, mp *image.Point) { orig := r.Min *r = r.Intersect(dst.Bounds()) *r = r.Intersect(src.Bounds().Add(orig.Sub(*sp))) if mask != nil { *r = r.Intersect(mask.Bounds().Add(orig.Sub(*mp))) } dx := r.Min.X - orig.X dy := r.Min.Y - orig.Y if dx == 0 && dy == 0 { return } sp.X += dx sp.Y += dy if mp != nil { mp.X += dx mp.Y += dy } } func processBackward(dst image.Image, r image.Rectangle, src image.Image, sp image.Point) bool { return dst == src && r.Overlaps(r.Add(sp.Sub(r.Min))) && (sp.Y < r.Min.Y || (sp.Y == r.Min.Y && sp.X < r.Min.X)) } // Draw calls [DrawMask] with a nil mask. func Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op) { DrawMask(dst, r, src, sp, nil, image.Point{}, op) } // DrawMask aligns r.Min in dst with sp in src and mp in mask and then replaces the rectangle r // in dst with the result of a Porter-Duff composition. A nil mask is treated as opaque. func DrawMask(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) { clip(dst, &r, src, &sp, mask, &mp) if r.Empty() { return } // Fast paths for special cases. If none of them apply, then we fall back // to general but slower implementations. // // For NRGBA and NRGBA64 image types, the code paths aren't just faster. // They also avoid the information loss that would otherwise occur from // converting non-alpha-premultiplied color to and from alpha-premultiplied // color. See TestDrawSrcNonpremultiplied. switch dst0 := dst.(type) { case *image.RGBA: if op == Over { if mask == nil { switch src0 := src.(type) { case *image.Uniform: sr, sg, sb, sa := src0.RGBA() if sa == 0xffff { drawFillSrc(dst0, r, sr, sg, sb, sa) } else { drawFillOver(dst0, r, sr, sg, sb, sa) } return case *image.RGBA: drawCopyOver(dst0, r, src0, sp) return case *image.NRGBA: drawNRGBAOver(dst0, r, src0, sp) return case *image.YCbCr: // An image.YCbCr is always fully opaque, and so if the // mask is nil (i.e. fully opaque) then the op is // effectively always Src. Similarly for image.Gray and // image.CMYK. if imageutil.DrawYCbCr(dst0, r, src0, sp) { return } case *image.Gray: drawGray(dst0, r, src0, sp) return case *image.CMYK: drawCMYK(dst0, r, src0, sp) return } } else if mask0, ok := mask.(*image.Alpha); ok { switch src0 := src.(type) { case *image.Uniform: drawGlyphOver(dst0, r, src0, mask0, mp) return case *image.RGBA: drawRGBAMaskOver(dst0, r, src0, sp, mask0, mp) return case *image.Gray: drawGrayMaskOver(dst0, r, src0, sp, mask0, mp) return // Case order matters. The next case (image.RGBA64Image) is an // interface type that the concrete types above also implement. case image.RGBA64Image: drawRGBA64ImageMaskOver(dst0, r, src0, sp, mask0, mp) return } } } else { if mask == nil { switch src0 := src.(type) { case *image.Uniform: sr, sg, sb, sa := src0.RGBA() drawFillSrc(dst0, r, sr, sg, sb, sa) return case *image.RGBA: d0 := dst0.PixOffset(r.Min.X, r.Min.Y) s0 := src0.PixOffset(sp.X, sp.Y) drawCopySrc( dst0.Pix[d0:], dst0.Stride, r, src0.Pix[s0:], src0.Stride, sp, 4*r.Dx()) return case *image.NRGBA: drawNRGBASrc(dst0, r, src0, sp) return case *image.YCbCr: if imageutil.DrawYCbCr(dst0, r, src0, sp) { return } case *image.Gray: drawGray(dst0, r, src0, sp) return case *image.CMYK: drawCMYK(dst0, r, src0, sp) return } } } drawRGBA(dst0, r, src, sp, mask, mp, op) return case *image.Paletted: if op == Src && mask == nil { if src0, ok := src.(*image.Uniform); ok { colorIndex := uint8(dst0.Palette.Index(src0.C)) i0 := dst0.PixOffset(r.Min.X, r.Min.Y) i1 := i0 + r.Dx() for i := i0; i < i1; i++ { dst0.Pix[i] = colorIndex } firstRow := dst0.Pix[i0:i1] for y := r.Min.Y + 1; y < r.Max.Y; y++ { i0 += dst0.Stride i1 += dst0.Stride copy(dst0.Pix[i0:i1], firstRow) } return } else if !processBackward(dst, r, src, sp) { drawPaletted(dst0, r, src, sp, false) return } } case *image.NRGBA: if op == Src && mask == nil { if src0, ok := src.(*image.NRGBA); ok { d0 := dst0.PixOffset(r.Min.X, r.Min.Y) s0 := src0.PixOffset(sp.X, sp.Y) drawCopySrc( dst0.Pix[d0:], dst0.Stride, r, src0.Pix[s0:], src0.Stride, sp, 4*r.Dx()) return } } case *image.NRGBA64: if op == Src && mask == nil { if src0, ok := src.(*image.NRGBA64); ok { d0 := dst0.PixOffset(r.Min.X, r.Min.Y) s0 := src0.PixOffset(sp.X, sp.Y) drawCopySrc( dst0.Pix[d0:], dst0.Stride, r, src0.Pix[s0:], src0.Stride, sp, 8*r.Dx()) return } } } x0, x1, dx := r.Min.X, r.Max.X, 1 y0, y1, dy := r.Min.Y, r.Max.Y, 1 if processBackward(dst, r, src, sp) { x0, x1, dx = x1-1, x0-1, -1 y0, y1, dy = y1-1, y0-1, -1 } // FALLBACK1.17 // // Try the draw.RGBA64Image and image.RGBA64Image interfaces, part of the // standard library since Go 1.17. These are like the draw.Image and // image.Image interfaces but they can avoid allocations from converting // concrete color types to the color.Color interface type. if dst0, _ := dst.(RGBA64Image); dst0 != nil { if src0, _ := src.(image.RGBA64Image); src0 != nil { if mask == nil { sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { sx := sp.X + x0 - r.Min.X mx := mp.X + x0 - r.Min.X for x := x0; x != x1; x, sx, mx = x+dx, sx+dx, mx+dx { if op == Src { dst0.SetRGBA64(x, y, src0.RGBA64At(sx, sy)) } else { srgba := src0.RGBA64At(sx, sy) a := m - uint32(srgba.A) drgba := dst0.RGBA64At(x, y) dst0.SetRGBA64(x, y, color.RGBA64{ R: uint16((uint32(drgba.R)*a)/m) + srgba.R, G: uint16((uint32(drgba.G)*a)/m) + srgba.G, B: uint16((uint32(drgba.B)*a)/m) + srgba.B, A: uint16((uint32(drgba.A)*a)/m) + srgba.A, }) } } } return } else if mask0, _ := mask.(image.RGBA64Image); mask0 != nil { sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { sx := sp.X + x0 - r.Min.X mx := mp.X + x0 - r.Min.X for x := x0; x != x1; x, sx, mx = x+dx, sx+dx, mx+dx { ma := uint32(mask0.RGBA64At(mx, my).A) switch { case ma == 0: if op == Over { // No-op. } else { dst0.SetRGBA64(x, y, color.RGBA64{}) } case ma == m && op == Src: dst0.SetRGBA64(x, y, src0.RGBA64At(sx, sy)) default: srgba := src0.RGBA64At(sx, sy) if op == Over { drgba := dst0.RGBA64At(x, y) a := m - (uint32(srgba.A) * ma / m) dst0.SetRGBA64(x, y, color.RGBA64{ R: uint16((uint32(drgba.R)*a + uint32(srgba.R)*ma) / m), G: uint16((uint32(drgba.G)*a + uint32(srgba.G)*ma) / m), B: uint16((uint32(drgba.B)*a + uint32(srgba.B)*ma) / m), A: uint16((uint32(drgba.A)*a + uint32(srgba.A)*ma) / m), }) } else { dst0.SetRGBA64(x, y, color.RGBA64{ R: uint16(uint32(srgba.R) * ma / m), G: uint16(uint32(srgba.G) * ma / m), B: uint16(uint32(srgba.B) * ma / m), A: uint16(uint32(srgba.A) * ma / m), }) } } } } return } } } // FALLBACK1.0 // // If none of the faster code paths above apply, use the draw.Image and // image.Image interfaces, part of the standard library since Go 1.0. var out color.RGBA64 sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { sx := sp.X + x0 - r.Min.X mx := mp.X + x0 - r.Min.X for x := x0; x != x1; x, sx, mx = x+dx, sx+dx, mx+dx { ma := uint32(m) if mask != nil { _, _, _, ma = mask.At(mx, my).RGBA() } switch { case ma == 0: if op == Over { // No-op. } else { dst.Set(x, y, color.Transparent) } case ma == m && op == Src: dst.Set(x, y, src.At(sx, sy)) default: sr, sg, sb, sa := src.At(sx, sy).RGBA() if op == Over { dr, dg, db, da := dst.At(x, y).RGBA() a := m - (sa * ma / m) out.R = uint16((dr*a + sr*ma) / m) out.G = uint16((dg*a + sg*ma) / m) out.B = uint16((db*a + sb*ma) / m) out.A = uint16((da*a + sa*ma) / m) } else { out.R = uint16(sr * ma / m) out.G = uint16(sg * ma / m) out.B = uint16(sb * ma / m) out.A = uint16(sa * ma / m) } // The third argument is &out instead of out (and out is // declared outside of the inner loop) to avoid the implicit // conversion to color.Color here allocating memory in the // inner loop if sizeof(color.RGBA64) > sizeof(uintptr). dst.Set(x, y, &out) } } } } func drawFillOver(dst *image.RGBA, r image.Rectangle, sr, sg, sb, sa uint32) { // The 0x101 is here for the same reason as in drawRGBA. a := (m - sa) * 0x101 i0 := dst.PixOffset(r.Min.X, r.Min.Y) i1 := i0 + r.Dx()*4 for y := r.Min.Y; y != r.Max.Y; y++ { for i := i0; i < i1; i += 4 { dr := &dst.Pix[i+0] dg := &dst.Pix[i+1] db := &dst.Pix[i+2] da := &dst.Pix[i+3] *dr = uint8((uint32(*dr)*a/m + sr) >> 8) *dg = uint8((uint32(*dg)*a/m + sg) >> 8) *db = uint8((uint32(*db)*a/m + sb) >> 8) *da = uint8((uint32(*da)*a/m + sa) >> 8) } i0 += dst.Stride i1 += dst.Stride } } func drawFillSrc(dst *image.RGBA, r image.Rectangle, sr, sg, sb, sa uint32) { sr8 := uint8(sr >> 8) sg8 := uint8(sg >> 8) sb8 := uint8(sb >> 8) sa8 := uint8(sa >> 8) // The built-in copy function is faster than a straightforward for loop to fill the destination with // the color, but copy requires a slice source. We therefore use a for loop to fill the first row, and // then use the first row as the slice source for the remaining rows. i0 := dst.PixOffset(r.Min.X, r.Min.Y) i1 := i0 + r.Dx()*4 for i := i0; i < i1; i += 4 { dst.Pix[i+0] = sr8 dst.Pix[i+1] = sg8 dst.Pix[i+2] = sb8 dst.Pix[i+3] = sa8 } firstRow := dst.Pix[i0:i1] for y := r.Min.Y + 1; y < r.Max.Y; y++ { i0 += dst.Stride i1 += dst.Stride copy(dst.Pix[i0:i1], firstRow) } } func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) { dx, dy := r.Dx(), r.Dy() d0 := dst.PixOffset(r.Min.X, r.Min.Y) s0 := src.PixOffset(sp.X, sp.Y) var ( ddelta, sdelta int i0, i1, idelta int ) if r.Min.Y < sp.Y || r.Min.Y == sp.Y && r.Min.X <= sp.X { ddelta = dst.Stride sdelta = src.Stride i0, i1, idelta = 0, dx*4, +4 } else { // If the source start point is higher than the destination start point, or equal height but to the left, // then we compose the rows in right-to-left, bottom-up order instead of left-to-right, top-down. d0 += (dy - 1) * dst.Stride s0 += (dy - 1) * src.Stride ddelta = -dst.Stride sdelta = -src.Stride i0, i1, idelta = (dx-1)*4, -4, -4 } for ; dy > 0; dy-- { dpix := dst.Pix[d0:] spix := src.Pix[s0:] for i := i0; i != i1; i += idelta { s := spix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 sr := uint32(s[0]) * 0x101 sg := uint32(s[1]) * 0x101 sb := uint32(s[2]) * 0x101 sa := uint32(s[3]) * 0x101 // The 0x101 is here for the same reason as in drawRGBA. a := (m - sa) * 0x101 d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 d[0] = uint8((uint32(d[0])*a/m + sr) >> 8) d[1] = uint8((uint32(d[1])*a/m + sg) >> 8) d[2] = uint8((uint32(d[2])*a/m + sb) >> 8) d[3] = uint8((uint32(d[3])*a/m + sa) >> 8) } d0 += ddelta s0 += sdelta } } // drawCopySrc copies bytes to dstPix from srcPix. These arguments roughly // correspond to the Pix fields of the image package's concrete image.Image // implementations, but are offset (dstPix is dst.Pix[dpOffset:] not dst.Pix). func drawCopySrc( dstPix []byte, dstStride int, r image.Rectangle, srcPix []byte, srcStride int, sp image.Point, bytesPerRow int) { d0, s0, ddelta, sdelta, dy := 0, 0, dstStride, srcStride, r.Dy() if r.Min.Y > sp.Y { // If the source start point is higher than the destination start // point, then we compose the rows in bottom-up order instead of // top-down. Unlike the drawCopyOver function, we don't have to check // the x coordinates because the built-in copy function can handle // overlapping slices. d0 = (dy - 1) * dstStride s0 = (dy - 1) * srcStride ddelta = -dstStride sdelta = -srcStride } for ; dy > 0; dy-- { copy(dstPix[d0:d0+bytesPerRow], srcPix[s0:s0+bytesPerRow]) d0 += ddelta s0 += sdelta } } func drawNRGBAOver(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image.Point) { i0 := (r.Min.X - dst.Rect.Min.X) * 4 i1 := (r.Max.X - dst.Rect.Min.X) * 4 si0 := (sp.X - src.Rect.Min.X) * 4 yMax := r.Max.Y - dst.Rect.Min.Y y := r.Min.Y - dst.Rect.Min.Y sy := sp.Y - src.Rect.Min.Y for ; y != yMax; y, sy = y+1, sy+1 { dpix := dst.Pix[y*dst.Stride:] spix := src.Pix[sy*src.Stride:] for i, si := i0, si0; i < i1; i, si = i+4, si+4 { // Convert from non-premultiplied color to pre-multiplied color. s := spix[si : si+4 : si+4] // Small cap improves performance, see https://golang.org/issue/27857 sa := uint32(s[3]) * 0x101 sr := uint32(s[0]) * sa / 0xff sg := uint32(s[1]) * sa / 0xff sb := uint32(s[2]) * sa / 0xff d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) // The 0x101 is here for the same reason as in drawRGBA. a := (m - sa) * 0x101 d[0] = uint8((dr*a/m + sr) >> 8) d[1] = uint8((dg*a/m + sg) >> 8) d[2] = uint8((db*a/m + sb) >> 8) d[3] = uint8((da*a/m + sa) >> 8) } } } func drawNRGBASrc(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image.Point) { i0 := (r.Min.X - dst.Rect.Min.X) * 4 i1 := (r.Max.X - dst.Rect.Min.X) * 4 si0 := (sp.X - src.Rect.Min.X) * 4 yMax := r.Max.Y - dst.Rect.Min.Y y := r.Min.Y - dst.Rect.Min.Y sy := sp.Y - src.Rect.Min.Y for ; y != yMax; y, sy = y+1, sy+1 { dpix := dst.Pix[y*dst.Stride:] spix := src.Pix[sy*src.Stride:] for i, si := i0, si0; i < i1; i, si = i+4, si+4 { // Convert from non-premultiplied color to pre-multiplied color. s := spix[si : si+4 : si+4] // Small cap improves performance, see https://golang.org/issue/27857 sa := uint32(s[3]) * 0x101 sr := uint32(s[0]) * sa / 0xff sg := uint32(s[1]) * sa / 0xff sb := uint32(s[2]) * sa / 0xff d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 d[0] = uint8(sr >> 8) d[1] = uint8(sg >> 8) d[2] = uint8(sb >> 8) d[3] = uint8(sa >> 8) } } } func drawGray(dst *image.RGBA, r image.Rectangle, src *image.Gray, sp image.Point) { i0 := (r.Min.X - dst.Rect.Min.X) * 4 i1 := (r.Max.X - dst.Rect.Min.X) * 4 si0 := (sp.X - src.Rect.Min.X) * 1 yMax := r.Max.Y - dst.Rect.Min.Y y := r.Min.Y - dst.Rect.Min.Y sy := sp.Y - src.Rect.Min.Y for ; y != yMax; y, sy = y+1, sy+1 { dpix := dst.Pix[y*dst.Stride:] spix := src.Pix[sy*src.Stride:] for i, si := i0, si0; i < i1; i, si = i+4, si+1 { p := spix[si] d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 d[0] = p d[1] = p d[2] = p d[3] = 255 } } } func drawCMYK(dst *image.RGBA, r image.Rectangle, src *image.CMYK, sp image.Point) { i0 := (r.Min.X - dst.Rect.Min.X) * 4 i1 := (r.Max.X - dst.Rect.Min.X) * 4 si0 := (sp.X - src.Rect.Min.X) * 4 yMax := r.Max.Y - dst.Rect.Min.Y y := r.Min.Y - dst.Rect.Min.Y sy := sp.Y - src.Rect.Min.Y for ; y != yMax; y, sy = y+1, sy+1 { dpix := dst.Pix[y*dst.Stride:] spix := src.Pix[sy*src.Stride:] for i, si := i0, si0; i < i1; i, si = i+4, si+4 { s := spix[si : si+4 : si+4] // Small cap improves performance, see https://golang.org/issue/27857 d := dpix[i : i+4 : i+4] d[0], d[1], d[2] = color.CMYKToRGB(s[0], s[1], s[2], s[3]) d[3] = 255 } } } func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform, mask *image.Alpha, mp image.Point) { i0 := dst.PixOffset(r.Min.X, r.Min.Y) i1 := i0 + r.Dx()*4 mi0 := mask.PixOffset(mp.X, mp.Y) sr, sg, sb, sa := src.RGBA() for y, my := r.Min.Y, mp.Y; y != r.Max.Y; y, my = y+1, my+1 { for i, mi := i0, mi0; i < i1; i, mi = i+4, mi+1 { ma := uint32(mask.Pix[mi]) if ma == 0 { continue } ma |= ma << 8 // The 0x101 is here for the same reason as in drawRGBA. a := (m - (sa * ma / m)) * 0x101 d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 d[0] = uint8((uint32(d[0])*a + sr*ma) / m >> 8) d[1] = uint8((uint32(d[1])*a + sg*ma) / m >> 8) d[2] = uint8((uint32(d[2])*a + sb*ma) / m >> 8) d[3] = uint8((uint32(d[3])*a + sa*ma) / m >> 8) } i0 += dst.Stride i1 += dst.Stride mi0 += mask.Stride } } func drawGrayMaskOver(dst *image.RGBA, r image.Rectangle, src *image.Gray, sp image.Point, mask *image.Alpha, mp image.Point) { x0, x1, dx := r.Min.X, r.Max.X, 1 y0, y1, dy := r.Min.Y, r.Max.Y, 1 if r.Overlaps(r.Add(sp.Sub(r.Min))) { if sp.Y < r.Min.Y || sp.Y == r.Min.Y && sp.X < r.Min.X { x0, x1, dx = x1-1, x0-1, -1 y0, y1, dy = y1-1, y0-1, -1 } } sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y sx0 := sp.X + x0 - r.Min.X mx0 := mp.X + x0 - r.Min.X sx1 := sx0 + (x1 - x0) i0 := dst.PixOffset(x0, y0) di := dx * 4 for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { mi := mask.PixOffset(mx, my) ma := uint32(mask.Pix[mi]) ma |= ma << 8 si := src.PixOffset(sx, sy) sy := uint32(src.Pix[si]) sy |= sy << 8 sa := uint32(0xffff) d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) // dr, dg, db and da are all 8-bit color at the moment, ranging in [0,255]. // We work in 16-bit color, and so would normally do: // dr |= dr << 8 // and similarly for dg, db and da, but instead we multiply a // (which is a 16-bit color, ranging in [0,65535]) by 0x101. // This yields the same result, but is fewer arithmetic operations. a := (m - (sa * ma / m)) * 0x101 d[0] = uint8((dr*a + sy*ma) / m >> 8) d[1] = uint8((dg*a + sy*ma) / m >> 8) d[2] = uint8((db*a + sy*ma) / m >> 8) d[3] = uint8((da*a + sa*ma) / m >> 8) } i0 += dy * dst.Stride } } func drawRGBAMaskOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point, mask *image.Alpha, mp image.Point) { x0, x1, dx := r.Min.X, r.Max.X, 1 y0, y1, dy := r.Min.Y, r.Max.Y, 1 if dst == src && r.Overlaps(r.Add(sp.Sub(r.Min))) { if sp.Y < r.Min.Y || sp.Y == r.Min.Y && sp.X < r.Min.X { x0, x1, dx = x1-1, x0-1, -1 y0, y1, dy = y1-1, y0-1, -1 } } sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y sx0 := sp.X + x0 - r.Min.X mx0 := mp.X + x0 - r.Min.X sx1 := sx0 + (x1 - x0) i0 := dst.PixOffset(x0, y0) di := dx * 4 for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { mi := mask.PixOffset(mx, my) ma := uint32(mask.Pix[mi]) ma |= ma << 8 si := src.PixOffset(sx, sy) sr := uint32(src.Pix[si+0]) sg := uint32(src.Pix[si+1]) sb := uint32(src.Pix[si+2]) sa := uint32(src.Pix[si+3]) sr |= sr << 8 sg |= sg << 8 sb |= sb << 8 sa |= sa << 8 d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) // dr, dg, db and da are all 8-bit color at the moment, ranging in [0,255]. // We work in 16-bit color, and so would normally do: // dr |= dr << 8 // and similarly for dg, db and da, but instead we multiply a // (which is a 16-bit color, ranging in [0,65535]) by 0x101. // This yields the same result, but is fewer arithmetic operations. a := (m - (sa * ma / m)) * 0x101 d[0] = uint8((dr*a + sr*ma) / m >> 8) d[1] = uint8((dg*a + sg*ma) / m >> 8) d[2] = uint8((db*a + sb*ma) / m >> 8) d[3] = uint8((da*a + sa*ma) / m >> 8) } i0 += dy * dst.Stride } } func drawRGBA64ImageMaskOver(dst *image.RGBA, r image.Rectangle, src image.RGBA64Image, sp image.Point, mask *image.Alpha, mp image.Point) { x0, x1, dx := r.Min.X, r.Max.X, 1 y0, y1, dy := r.Min.Y, r.Max.Y, 1 if image.Image(dst) == src && r.Overlaps(r.Add(sp.Sub(r.Min))) { if sp.Y < r.Min.Y || sp.Y == r.Min.Y && sp.X < r.Min.X { x0, x1, dx = x1-1, x0-1, -1 y0, y1, dy = y1-1, y0-1, -1 } } sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y sx0 := sp.X + x0 - r.Min.X mx0 := mp.X + x0 - r.Min.X sx1 := sx0 + (x1 - x0) i0 := dst.PixOffset(x0, y0) di := dx * 4 for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { mi := mask.PixOffset(mx, my) ma := uint32(mask.Pix[mi]) ma |= ma << 8 srgba := src.RGBA64At(sx, sy) d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) // dr, dg, db and da are all 8-bit color at the moment, ranging in [0,255]. // We work in 16-bit color, and so would normally do: // dr |= dr << 8 // and similarly for dg, db and da, but instead we multiply a // (which is a 16-bit color, ranging in [0,65535]) by 0x101. // This yields the same result, but is fewer arithmetic operations. a := (m - (uint32(srgba.A) * ma / m)) * 0x101 d[0] = uint8((dr*a + uint32(srgba.R)*ma) / m >> 8) d[1] = uint8((dg*a + uint32(srgba.G)*ma) / m >> 8) d[2] = uint8((db*a + uint32(srgba.B)*ma) / m >> 8) d[3] = uint8((da*a + uint32(srgba.A)*ma) / m >> 8) } i0 += dy * dst.Stride } } func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) { x0, x1, dx := r.Min.X, r.Max.X, 1 y0, y1, dy := r.Min.Y, r.Max.Y, 1 if image.Image(dst) == src && r.Overlaps(r.Add(sp.Sub(r.Min))) { if sp.Y < r.Min.Y || sp.Y == r.Min.Y && sp.X < r.Min.X { x0, x1, dx = x1-1, x0-1, -1 y0, y1, dy = y1-1, y0-1, -1 } } sy := sp.Y + y0 - r.Min.Y my := mp.Y + y0 - r.Min.Y sx0 := sp.X + x0 - r.Min.X mx0 := mp.X + x0 - r.Min.X sx1 := sx0 + (x1 - x0) i0 := dst.PixOffset(x0, y0) di := dx * 4 // Try the image.RGBA64Image interface, part of the standard library since // Go 1.17. // // This optimization is similar to how FALLBACK1.17 optimizes FALLBACK1.0 // in DrawMask, except here the concrete type of dst is known to be // *image.RGBA. if src0, _ := src.(image.RGBA64Image); src0 != nil { if mask == nil { if op == Over { for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { srgba := src0.RGBA64At(sx, sy) d := dst.Pix[i : i+4 : i+4] dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) a := (m - uint32(srgba.A)) * 0x101 d[0] = uint8((dr*a/m + uint32(srgba.R)) >> 8) d[1] = uint8((dg*a/m + uint32(srgba.G)) >> 8) d[2] = uint8((db*a/m + uint32(srgba.B)) >> 8) d[3] = uint8((da*a/m + uint32(srgba.A)) >> 8) } i0 += dy * dst.Stride } } else { for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { srgba := src0.RGBA64At(sx, sy) d := dst.Pix[i : i+4 : i+4] d[0] = uint8(srgba.R >> 8) d[1] = uint8(srgba.G >> 8) d[2] = uint8(srgba.B >> 8) d[3] = uint8(srgba.A >> 8) } i0 += dy * dst.Stride } } return } else if mask0, _ := mask.(image.RGBA64Image); mask0 != nil { if op == Over { for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { ma := uint32(mask0.RGBA64At(mx, my).A) srgba := src0.RGBA64At(sx, sy) d := dst.Pix[i : i+4 : i+4] dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) a := (m - (uint32(srgba.A) * ma / m)) * 0x101 d[0] = uint8((dr*a + uint32(srgba.R)*ma) / m >> 8) d[1] = uint8((dg*a + uint32(srgba.G)*ma) / m >> 8) d[2] = uint8((db*a + uint32(srgba.B)*ma) / m >> 8) d[3] = uint8((da*a + uint32(srgba.A)*ma) / m >> 8) } i0 += dy * dst.Stride } } else { for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { ma := uint32(mask0.RGBA64At(mx, my).A) srgba := src0.RGBA64At(sx, sy) d := dst.Pix[i : i+4 : i+4] d[0] = uint8(uint32(srgba.R) * ma / m >> 8) d[1] = uint8(uint32(srgba.G) * ma / m >> 8) d[2] = uint8(uint32(srgba.B) * ma / m >> 8) d[3] = uint8(uint32(srgba.A) * ma / m >> 8) } i0 += dy * dst.Stride } } return } } // Use the image.Image interface, part of the standard library since Go // 1.0. // // This is similar to FALLBACK1.0 in DrawMask, except here the concrete // type of dst is known to be *image.RGBA. for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { ma := uint32(m) if mask != nil { _, _, _, ma = mask.At(mx, my).RGBA() } sr, sg, sb, sa := src.At(sx, sy).RGBA() d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 if op == Over { dr := uint32(d[0]) dg := uint32(d[1]) db := uint32(d[2]) da := uint32(d[3]) // dr, dg, db and da are all 8-bit color at the moment, ranging in [0,255]. // We work in 16-bit color, and so would normally do: // dr |= dr << 8 // and similarly for dg, db and da, but instead we multiply a // (which is a 16-bit color, ranging in [0,65535]) by 0x101. // This yields the same result, but is fewer arithmetic operations. a := (m - (sa * ma / m)) * 0x101 d[0] = uint8((dr*a + sr*ma) / m >> 8) d[1] = uint8((dg*a + sg*ma) / m >> 8) d[2] = uint8((db*a + sb*ma) / m >> 8) d[3] = uint8((da*a + sa*ma) / m >> 8) } else { d[0] = uint8(sr * ma / m >> 8) d[1] = uint8(sg * ma / m >> 8) d[2] = uint8(sb * ma / m >> 8) d[3] = uint8(sa * ma / m >> 8) } } i0 += dy * dst.Stride } } // clamp clamps i to the interval [0, 0xffff]. func clamp(i int32) int32 { if i < 0 { return 0 } if i > 0xffff { return 0xffff } return i } // sqDiff returns the squared-difference of x and y, shifted by 2 so that // adding four of those won't overflow a uint32. // // x and y are both assumed to be in the range [0, 0xffff]. func sqDiff(x, y int32) uint32 { // This is an optimized code relying on the overflow/wrap around // properties of unsigned integers operations guaranteed by the language // spec. See sqDiff from the image/color package for more details. d := uint32(x - y) return (d * d) >> 2 } func drawPaletted(dst Image, r image.Rectangle, src image.Image, sp image.Point, floydSteinberg bool) { // TODO(nigeltao): handle the case where the dst and src overlap. // Does it even make sense to try and do Floyd-Steinberg whilst // walking the image backward (right-to-left bottom-to-top)? // If dst is an *image.Paletted, we have a fast path for dst.Set and // dst.At. The dst.Set equivalent is a batch version of the algorithm // used by color.Palette's Index method in image/color/color.go, plus // optional Floyd-Steinberg error diffusion. palette, pix, stride := [][4]int32(nil), []byte(nil), 0 if p, ok := dst.(*image.Paletted); ok { palette = make([][4]int32, len(p.Palette)) for i, col := range p.Palette { r, g, b, a := col.RGBA() palette[i][0] = int32(r) palette[i][1] = int32(g) palette[i][2] = int32(b) palette[i][3] = int32(a) } pix, stride = p.Pix[p.PixOffset(r.Min.X, r.Min.Y):], p.Stride } // quantErrorCurr and quantErrorNext are the Floyd-Steinberg quantization // errors that have been propagated to the pixels in the current and next // rows. The +2 simplifies calculation near the edges. var quantErrorCurr, quantErrorNext [][4]int32 if floydSteinberg { quantErrorCurr = make([][4]int32, r.Dx()+2) quantErrorNext = make([][4]int32, r.Dx()+2) } pxRGBA := func(x, y int) (r, g, b, a uint32) { return src.At(x, y).RGBA() } // Fast paths for special cases to avoid excessive use of the color.Color // interface which escapes to the heap but need to be discovered for // each pixel on r. See also https://golang.org/issues/15759. switch src0 := src.(type) { case *image.RGBA: pxRGBA = func(x, y int) (r, g, b, a uint32) { return src0.RGBAAt(x, y).RGBA() } case *image.NRGBA: pxRGBA = func(x, y int) (r, g, b, a uint32) { return src0.NRGBAAt(x, y).RGBA() } case *image.YCbCr: pxRGBA = func(x, y int) (r, g, b, a uint32) { return src0.YCbCrAt(x, y).RGBA() } } // Loop over each source pixel. out := color.RGBA64{A: 0xffff} for y := 0; y != r.Dy(); y++ { for x := 0; x != r.Dx(); x++ { // er, eg and eb are the pixel's R,G,B values plus the // optional Floyd-Steinberg error. sr, sg, sb, sa := pxRGBA(sp.X+x, sp.Y+y) er, eg, eb, ea := int32(sr), int32(sg), int32(sb), int32(sa) if floydSteinberg { er = clamp(er + quantErrorCurr[x+1][0]/16) eg = clamp(eg + quantErrorCurr[x+1][1]/16) eb = clamp(eb + quantErrorCurr[x+1][2]/16) ea = clamp(ea + quantErrorCurr[x+1][3]/16) } if palette != nil { // Find the closest palette color in Euclidean R,G,B,A space: // the one that minimizes sum-squared-difference. // TODO(nigeltao): consider smarter algorithms. bestIndex, bestSum := 0, uint32(1<<32-1) for index, p := range palette { sum := sqDiff(er, p[0]) + sqDiff(eg, p[1]) + sqDiff(eb, p[2]) + sqDiff(ea, p[3]) if sum < bestSum { bestIndex, bestSum = index, sum if sum == 0 { break } } } pix[y*stride+x] = byte(bestIndex) if !floydSteinberg { continue } er -= palette[bestIndex][0] eg -= palette[bestIndex][1] eb -= palette[bestIndex][2] ea -= palette[bestIndex][3] } else { out.R = uint16(er) out.G = uint16(eg) out.B = uint16(eb) out.A = uint16(ea) // The third argument is &out instead of out (and out is // declared outside of the inner loop) to avoid the implicit // conversion to color.Color here allocating memory in the // inner loop if sizeof(color.RGBA64) > sizeof(uintptr). dst.Set(r.Min.X+x, r.Min.Y+y, &out) if !floydSteinberg { continue } sr, sg, sb, sa = dst.At(r.Min.X+x, r.Min.Y+y).RGBA() er -= int32(sr) eg -= int32(sg) eb -= int32(sb) ea -= int32(sa) } // Propagate the Floyd-Steinberg quantization error. quantErrorNext[x+0][0] += er * 3 quantErrorNext[x+0][1] += eg * 3 quantErrorNext[x+0][2] += eb * 3 quantErrorNext[x+0][3] += ea * 3 quantErrorNext[x+1][0] += er * 5 quantErrorNext[x+1][1] += eg * 5 quantErrorNext[x+1][2] += eb * 5 quantErrorNext[x+1][3] += ea * 5 quantErrorNext[x+2][0] += er * 1 quantErrorNext[x+2][1] += eg * 1 quantErrorNext[x+2][2] += eb * 1 quantErrorNext[x+2][3] += ea * 1 quantErrorCurr[x+2][0] += er * 7 quantErrorCurr[x+2][1] += eg * 7 quantErrorCurr[x+2][2] += eb * 7 quantErrorCurr[x+2][3] += ea * 7 } // Recycle the quantization error buffers. if floydSteinberg { quantErrorCurr, quantErrorNext = quantErrorNext, quantErrorCurr clear(quantErrorNext) } } }
go/src/image/draw/draw.go/0
{ "file_path": "go/src/image/draw/draw.go", "repo_id": "go", "token_count": 17102 }
266
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package jpeg import ( "fmt" "math" "math/rand" "strings" "testing" ) func benchmarkDCT(b *testing.B, f func(*block)) { b.StopTimer() blocks := make([]block, 0, b.N*len(testBlocks)) for i := 0; i < b.N; i++ { blocks = append(blocks, testBlocks[:]...) } b.StartTimer() for i := range blocks { f(&blocks[i]) } } func BenchmarkFDCT(b *testing.B) { benchmarkDCT(b, fdct) } func BenchmarkIDCT(b *testing.B) { benchmarkDCT(b, idct) } func TestDCT(t *testing.T) { blocks := make([]block, len(testBlocks)) copy(blocks, testBlocks[:]) // Append some randomly generated blocks of varying sparseness. r := rand.New(rand.NewSource(123)) for i := 0; i < 100; i++ { b := block{} n := r.Int() % 64 for j := 0; j < n; j++ { b[r.Int()%len(b)] = r.Int31() % 256 } blocks = append(blocks, b) } // Check that the FDCT and IDCT functions are inverses, after a scale and // level shift. Scaling reduces the rounding errors in the conversion from // floats to ints. for i, b := range blocks { got, want := b, b for j := range got { got[j] = (got[j] - 128) * 8 } slowFDCT(&got) slowIDCT(&got) for j := range got { got[j] = got[j]/8 + 128 } if differ(&got, &want) { t.Errorf("i=%d: IDCT(FDCT)\nsrc\n%s\ngot\n%s\nwant\n%s\n", i, &b, &got, &want) } } // Check that the optimized and slow FDCT implementations agree. // The fdct function already does a scale and level shift. for i, b := range blocks { got, want := b, b fdct(&got) for j := range want { want[j] = (want[j] - 128) * 8 } slowFDCT(&want) if differ(&got, &want) { t.Errorf("i=%d: FDCT\nsrc\n%s\ngot\n%s\nwant\n%s\n", i, &b, &got, &want) } } // Check that the optimized and slow IDCT implementations agree. for i, b := range blocks { got, want := b, b idct(&got) slowIDCT(&want) if differ(&got, &want) { t.Errorf("i=%d: IDCT\nsrc\n%s\ngot\n%s\nwant\n%s\n", i, &b, &got, &want) } } } // differ reports whether any pair-wise elements in b0 and b1 differ by 2 or // more. That tolerance is because there isn't a single definitive decoding of // a given JPEG image, even before the YCbCr to RGB conversion; implementations // can have different IDCT rounding errors. func differ(b0, b1 *block) bool { for i := range b0 { delta := b0[i] - b1[i] if delta < -2 || +2 < delta { return true } } return false } // alpha returns 1 if i is 0 and returns √2 otherwise. func alpha(i int) float64 { if i == 0 { return 1 } return math.Sqrt2 } var cosines [32]float64 // cosines[k] = cos(π/2 * k/8) func init() { for k := range cosines { cosines[k] = math.Cos(math.Pi * float64(k) / 16) } } // slowFDCT performs the 8*8 2-dimensional forward discrete cosine transform: // // dst[u,v] = (1/8) * Σ_x Σ_y alpha(u) * alpha(v) * src[x,y] * // cos((π/2) * (2*x + 1) * u / 8) * // cos((π/2) * (2*y + 1) * v / 8) // // x and y are in pixel space, and u and v are in transform space. // // b acts as both dst and src. func slowFDCT(b *block) { var dst [blockSize]float64 for v := 0; v < 8; v++ { for u := 0; u < 8; u++ { sum := 0.0 for y := 0; y < 8; y++ { for x := 0; x < 8; x++ { sum += alpha(u) * alpha(v) * float64(b[8*y+x]) * cosines[((2*x+1)*u)%32] * cosines[((2*y+1)*v)%32] } } dst[8*v+u] = sum / 8 } } // Convert from float64 to int32. for i := range dst { b[i] = int32(dst[i] + 0.5) } } // slowIDCT performs the 8*8 2-dimensional inverse discrete cosine transform: // // dst[x,y] = (1/8) * Σ_u Σ_v alpha(u) * alpha(v) * src[u,v] * // cos((π/2) * (2*x + 1) * u / 8) * // cos((π/2) * (2*y + 1) * v / 8) // // x and y are in pixel space, and u and v are in transform space. // // b acts as both dst and src. func slowIDCT(b *block) { var dst [blockSize]float64 for y := 0; y < 8; y++ { for x := 0; x < 8; x++ { sum := 0.0 for v := 0; v < 8; v++ { for u := 0; u < 8; u++ { sum += alpha(u) * alpha(v) * float64(b[8*v+u]) * cosines[((2*x+1)*u)%32] * cosines[((2*y+1)*v)%32] } } dst[8*y+x] = sum / 8 } } // Convert from float64 to int32. for i := range dst { b[i] = int32(dst[i] + 0.5) } } func (b *block) String() string { s := &strings.Builder{} fmt.Fprintf(s, "{\n") for y := 0; y < 8; y++ { fmt.Fprintf(s, "\t") for x := 0; x < 8; x++ { fmt.Fprintf(s, "0x%04x, ", uint16(b[8*y+x])) } fmt.Fprintln(s) } fmt.Fprintf(s, "}") return s.String() } // testBlocks are the first 10 pre-IDCT blocks from ../testdata/video-001.jpeg. var testBlocks = [10]block{ { 0x7f, 0xf6, 0x01, 0x07, 0xff, 0x00, 0x00, 0x00, 0xf5, 0x01, 0xfa, 0x01, 0xfe, 0x00, 0x01, 0x00, 0x05, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xf8, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0xff, 0xff, 0x00, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0xff, 0x01, 0x00, 0xfe, }, { 0x29, 0x07, 0x00, 0xfc, 0x01, 0x01, 0x00, 0x00, 0x07, 0x00, 0x03, 0x00, 0x01, 0x00, 0xff, 0xff, 0xff, 0xfd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0xff, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0xfa, 0x01, 0x00, 0x01, 0x00, 0x01, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x02, }, { 0xc5, 0xfa, 0x01, 0x00, 0x00, 0x01, 0x00, 0xff, 0x02, 0xff, 0x01, 0x00, 0x01, 0x00, 0xff, 0x00, 0xff, 0xff, 0x00, 0xff, 0x01, 0x00, 0x00, 0x00, 0xff, 0x00, 0x01, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, { 0x86, 0x05, 0x00, 0x02, 0x00, 0x00, 0x01, 0x00, 0xf2, 0x06, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0xf6, 0xfa, 0xf9, 0x00, 0xff, 0x01, 0x00, 0x00, 0xf9, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x01, 0x00, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0xff, 0x01, 0x00, 0xff, 0x00, 0x00, }, { 0x24, 0xfe, 0x00, 0xff, 0x00, 0xff, 0xff, 0x00, 0x08, 0xfd, 0x00, 0x01, 0x01, 0x00, 0x01, 0x00, 0x06, 0x03, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x01, 0xff, 0x00, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0xff, 0x01, }, { 0xcd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xff, }, { 0x81, 0xfe, 0x05, 0xff, 0x01, 0xff, 0x01, 0x00, 0xef, 0xf9, 0x00, 0xf9, 0x00, 0xff, 0x00, 0xff, 0x05, 0xf9, 0x00, 0xf8, 0x01, 0xff, 0x01, 0xff, 0x00, 0xff, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x01, 0xff, 0x01, 0x01, 0x00, 0xff, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, }, { 0x28, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x02, 0x01, 0x03, 0x00, 0xff, 0x00, 0x01, 0xfe, 0x02, 0x01, 0x03, 0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0xfd, 0x00, 0x01, 0x00, 0xff, 0x00, 0x01, 0xff, 0x00, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x01, 0x00, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0x00, 0x01, }, { 0xdf, 0xf9, 0xfe, 0x00, 0x03, 0x01, 0xff, 0xff, 0x04, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x01, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, }, { 0x88, 0xfd, 0x00, 0x00, 0xff, 0x00, 0x01, 0xff, 0xe1, 0x06, 0x06, 0x01, 0xff, 0x00, 0x01, 0x00, 0x08, 0x00, 0xfa, 0x00, 0xff, 0xff, 0xff, 0xff, 0x08, 0x01, 0x00, 0xff, 0x01, 0xff, 0x00, 0x00, 0xf5, 0xff, 0x00, 0x01, 0xff, 0x01, 0x01, 0x00, 0xff, 0xff, 0x01, 0xff, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x01, 0xff, 0x00, 0xff, 0x00, 0x01, 0x02, 0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0x00, }, }
go/src/image/jpeg/dct_test.go/0
{ "file_path": "go/src/image/jpeg/dct_test.go", "repo_id": "go", "token_count": 4910 }
267
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package png import ( "bufio" "bytes" "fmt" "image" "image/color" "io" "os" "reflect" "strings" "testing" ) var filenames = []string{ "basn0g01", "basn0g01-30", "basn0g02", "basn0g02-29", "basn0g04", "basn0g04-31", "basn0g08", "basn0g16", "basn2c08", "basn2c16", "basn3p01", "basn3p02", "basn3p04", "basn3p04-31i", "basn3p08", "basn3p08-trns", "basn4a08", "basn4a16", "basn6a08", "basn6a16", "ftbbn0g01", "ftbbn0g02", "ftbbn0g04", "ftbbn2c16", "ftbbn3p08", "ftbgn2c16", "ftbgn3p08", "ftbrn2c08", "ftbwn0g16", "ftbwn3p08", "ftbyn3p08", "ftp0n0g08", "ftp0n2c08", "ftp0n3p08", "ftp1n3p08", } var filenamesPaletted = []string{ "basn3p01", "basn3p02", "basn3p04", "basn3p08", "basn3p08-trns", } var filenamesShort = []string{ "basn0g01", "basn0g04-31", "basn6a16", } func readPNG(filename string) (image.Image, error) { f, err := os.Open(filename) if err != nil { return nil, err } defer f.Close() return Decode(f) } // fakebKGDs maps from filenames to fake bKGD chunks for our approximation to // the sng command-line tool. Package png doesn't keep that metadata when // png.Decode returns an image.Image. var fakebKGDs = map[string]string{ "ftbbn0g01": "bKGD {gray: 0;}\n", "ftbbn0g02": "bKGD {gray: 0;}\n", "ftbbn0g04": "bKGD {gray: 0;}\n", "ftbbn2c16": "bKGD {red: 0; green: 0; blue: 65535;}\n", "ftbbn3p08": "bKGD {index: 245}\n", "ftbgn2c16": "bKGD {red: 0; green: 65535; blue: 0;}\n", "ftbgn3p08": "bKGD {index: 245}\n", "ftbrn2c08": "bKGD {red: 255; green: 0; blue: 0;}\n", "ftbwn0g16": "bKGD {gray: 65535;}\n", "ftbwn3p08": "bKGD {index: 0}\n", "ftbyn3p08": "bKGD {index: 245}\n", } // fakegAMAs maps from filenames to fake gAMA chunks for our approximation to // the sng command-line tool. Package png doesn't keep that metadata when // png.Decode returns an image.Image. var fakegAMAs = map[string]string{ "ftbbn0g01": "", "ftbbn0g02": "gAMA {0.45455}\n", } // fakeIHDRUsings maps from filenames to fake IHDR "using" lines for our // approximation to the sng command-line tool. The PNG model is that // transparency (in the tRNS chunk) is separate to the color/grayscale/palette // color model (in the IHDR chunk). The Go model is that the concrete // image.Image type returned by png.Decode, such as image.RGBA (with all pixels // having 100% alpha) or image.NRGBA, encapsulates whether or not the image has // transparency. This map is a hack to work around the fact that the Go model // can't otherwise discriminate PNG's "IHDR says color (with no alpha) but tRNS // says alpha" and "IHDR says color with alpha". var fakeIHDRUsings = map[string]string{ "ftbbn0g01": " using grayscale;\n", "ftbbn0g02": " using grayscale;\n", "ftbbn0g04": " using grayscale;\n", "ftbbn2c16": " using color;\n", "ftbgn2c16": " using color;\n", "ftbrn2c08": " using color;\n", "ftbwn0g16": " using grayscale;\n", } // An approximation of the sng command-line tool. func sng(w io.WriteCloser, filename string, png image.Image) { defer w.Close() bounds := png.Bounds() cm := png.ColorModel() var bitdepth int switch cm { case color.RGBAModel, color.NRGBAModel, color.AlphaModel, color.GrayModel: bitdepth = 8 default: bitdepth = 16 } cpm, _ := cm.(color.Palette) var paletted *image.Paletted if cpm != nil { switch { case len(cpm) <= 2: bitdepth = 1 case len(cpm) <= 4: bitdepth = 2 case len(cpm) <= 16: bitdepth = 4 default: bitdepth = 8 } paletted = png.(*image.Paletted) } // Write the filename and IHDR. io.WriteString(w, "#SNG: from "+filename+".png\nIHDR {\n") fmt.Fprintf(w, " width: %d; height: %d; bitdepth: %d;\n", bounds.Dx(), bounds.Dy(), bitdepth) if s, ok := fakeIHDRUsings[filename]; ok { io.WriteString(w, s) } else { switch { case cm == color.RGBAModel, cm == color.RGBA64Model: io.WriteString(w, " using color;\n") case cm == color.NRGBAModel, cm == color.NRGBA64Model: io.WriteString(w, " using color alpha;\n") case cm == color.GrayModel, cm == color.Gray16Model: io.WriteString(w, " using grayscale;\n") case cpm != nil: io.WriteString(w, " using color palette;\n") default: io.WriteString(w, "unknown PNG decoder color model\n") } } io.WriteString(w, "}\n") // We fake a gAMA chunk. The test files have a gAMA chunk but the go PNG // parser ignores it (the PNG spec section 11.3 says "Ancillary chunks may // be ignored by a decoder"). if s, ok := fakegAMAs[filename]; ok { io.WriteString(w, s) } else { io.WriteString(w, "gAMA {1.0000}\n") } // Write the PLTE and tRNS (if applicable). useTransparent := false if cpm != nil { lastAlpha := -1 io.WriteString(w, "PLTE {\n") for i, c := range cpm { var r, g, b, a uint8 switch c := c.(type) { case color.RGBA: r, g, b, a = c.R, c.G, c.B, 0xff case color.NRGBA: r, g, b, a = c.R, c.G, c.B, c.A default: panic("unknown palette color type") } if a != 0xff { lastAlpha = i } fmt.Fprintf(w, " (%3d,%3d,%3d) # rgb = (0x%02x,0x%02x,0x%02x)\n", r, g, b, r, g, b) } io.WriteString(w, "}\n") if s, ok := fakebKGDs[filename]; ok { io.WriteString(w, s) } if lastAlpha != -1 { io.WriteString(w, "tRNS {\n") for i := 0; i <= lastAlpha; i++ { _, _, _, a := cpm[i].RGBA() a >>= 8 fmt.Fprintf(w, " %d", a) } io.WriteString(w, "}\n") } } else if strings.HasPrefix(filename, "ft") { if s, ok := fakebKGDs[filename]; ok { io.WriteString(w, s) } // We fake a tRNS chunk. The test files' grayscale and truecolor // transparent images all have their top left corner transparent. switch c := png.At(0, 0).(type) { case color.NRGBA: if c.A == 0 { useTransparent = true io.WriteString(w, "tRNS {\n") switch filename { case "ftbbn0g01", "ftbbn0g02", "ftbbn0g04": // The standard image package doesn't have a "gray with // alpha" type. Instead, we use an image.NRGBA. fmt.Fprintf(w, " gray: %d;\n", c.R) default: fmt.Fprintf(w, " red: %d; green: %d; blue: %d;\n", c.R, c.G, c.B) } io.WriteString(w, "}\n") } case color.NRGBA64: if c.A == 0 { useTransparent = true io.WriteString(w, "tRNS {\n") switch filename { case "ftbwn0g16": // The standard image package doesn't have a "gray16 with // alpha" type. Instead, we use an image.NRGBA64. fmt.Fprintf(w, " gray: %d;\n", c.R) default: fmt.Fprintf(w, " red: %d; green: %d; blue: %d;\n", c.R, c.G, c.B) } io.WriteString(w, "}\n") } } } // Write the IMAGE. io.WriteString(w, "IMAGE {\n pixels hex\n") for y := bounds.Min.Y; y < bounds.Max.Y; y++ { switch { case cm == color.GrayModel: for x := bounds.Min.X; x < bounds.Max.X; x++ { gray := png.At(x, y).(color.Gray) fmt.Fprintf(w, "%02x", gray.Y) } case cm == color.Gray16Model: for x := bounds.Min.X; x < bounds.Max.X; x++ { gray16 := png.At(x, y).(color.Gray16) fmt.Fprintf(w, "%04x ", gray16.Y) } case cm == color.RGBAModel: for x := bounds.Min.X; x < bounds.Max.X; x++ { rgba := png.At(x, y).(color.RGBA) fmt.Fprintf(w, "%02x%02x%02x ", rgba.R, rgba.G, rgba.B) } case cm == color.RGBA64Model: for x := bounds.Min.X; x < bounds.Max.X; x++ { rgba64 := png.At(x, y).(color.RGBA64) fmt.Fprintf(w, "%04x%04x%04x ", rgba64.R, rgba64.G, rgba64.B) } case cm == color.NRGBAModel: for x := bounds.Min.X; x < bounds.Max.X; x++ { nrgba := png.At(x, y).(color.NRGBA) switch filename { case "ftbbn0g01", "ftbbn0g02", "ftbbn0g04": fmt.Fprintf(w, "%02x", nrgba.R) default: if useTransparent { fmt.Fprintf(w, "%02x%02x%02x ", nrgba.R, nrgba.G, nrgba.B) } else { fmt.Fprintf(w, "%02x%02x%02x%02x ", nrgba.R, nrgba.G, nrgba.B, nrgba.A) } } } case cm == color.NRGBA64Model: for x := bounds.Min.X; x < bounds.Max.X; x++ { nrgba64 := png.At(x, y).(color.NRGBA64) switch filename { case "ftbwn0g16": fmt.Fprintf(w, "%04x ", nrgba64.R) default: if useTransparent { fmt.Fprintf(w, "%04x%04x%04x ", nrgba64.R, nrgba64.G, nrgba64.B) } else { fmt.Fprintf(w, "%04x%04x%04x%04x ", nrgba64.R, nrgba64.G, nrgba64.B, nrgba64.A) } } } case cpm != nil: var b, c int for x := bounds.Min.X; x < bounds.Max.X; x++ { b = b<<uint(bitdepth) | int(paletted.ColorIndexAt(x, y)) c++ if c == 8/bitdepth { fmt.Fprintf(w, "%02x", b) b = 0 c = 0 } } if c != 0 { for c != 8/bitdepth { b = b << uint(bitdepth) c++ } fmt.Fprintf(w, "%02x", b) } } io.WriteString(w, "\n") } io.WriteString(w, "}\n") } func TestReader(t *testing.T) { names := filenames if testing.Short() { names = filenamesShort } for _, fn := range names { // Read the .png file. img, err := readPNG("testdata/pngsuite/" + fn + ".png") if err != nil { t.Error(fn, err) continue } if fn == "basn4a16" { // basn4a16.sng is gray + alpha but sng() will produce true color + alpha // so we just check a single random pixel. c := img.At(2, 1).(color.NRGBA64) if c.R != 0x11a7 || c.G != 0x11a7 || c.B != 0x11a7 || c.A != 0x1085 { t.Error(fn, fmt.Errorf("wrong pixel value at (2, 1): %x", c)) } continue } piper, pipew := io.Pipe() pb := bufio.NewScanner(piper) go sng(pipew, fn, img) defer piper.Close() // Read the .sng file. sf, err := os.Open("testdata/pngsuite/" + fn + ".sng") if err != nil { t.Error(fn, err) continue } defer sf.Close() sb := bufio.NewScanner(sf) // Compare the two, in SNG format, line by line. for { pdone := !pb.Scan() sdone := !sb.Scan() if pdone && sdone { break } if pdone || sdone { t.Errorf("%s: Different sizes", fn) break } ps := pb.Text() ss := sb.Text() // Newer versions of the sng command line tool append an optional // color name to the RGB tuple. For example: // # rgb = (0xff,0xff,0xff) grey100 // # rgb = (0x00,0x00,0xff) blue1 // instead of the older version's plainer: // # rgb = (0xff,0xff,0xff) // # rgb = (0x00,0x00,0xff) // We strip any such name. if strings.Contains(ss, "# rgb = (") && !strings.HasSuffix(ss, ")") { if i := strings.LastIndex(ss, ") "); i >= 0 { ss = ss[:i+1] } } if ps != ss { t.Errorf("%s: Mismatch\n%s\nversus\n%s\n", fn, ps, ss) break } } if pb.Err() != nil { t.Error(fn, pb.Err()) } if sb.Err() != nil { t.Error(fn, sb.Err()) } } } var readerErrors = []struct { file string err string }{ {"invalid-zlib.png", "zlib: invalid checksum"}, {"invalid-crc32.png", "invalid checksum"}, {"invalid-noend.png", "unexpected EOF"}, {"invalid-trunc.png", "unexpected EOF"}, } func TestReaderError(t *testing.T) { for _, tt := range readerErrors { img, err := readPNG("testdata/" + tt.file) if err == nil { t.Errorf("decoding %s: missing error", tt.file) continue } if !strings.Contains(err.Error(), tt.err) { t.Errorf("decoding %s: %s, want %s", tt.file, err, tt.err) } if img != nil { t.Errorf("decoding %s: have image + error", tt.file) } } } func TestPalettedDecodeConfig(t *testing.T) { for _, fn := range filenamesPaletted { f, err := os.Open("testdata/pngsuite/" + fn + ".png") if err != nil { t.Errorf("%s: open failed: %v", fn, err) continue } defer f.Close() cfg, err := DecodeConfig(f) if err != nil { t.Errorf("%s: %v", fn, err) continue } pal, ok := cfg.ColorModel.(color.Palette) if !ok { t.Errorf("%s: expected paletted color model", fn) continue } if pal == nil { t.Errorf("%s: palette not initialized", fn) continue } } } func TestInterlaced(t *testing.T) { a, err := readPNG("testdata/gray-gradient.png") if err != nil { t.Fatal(err) } b, err := readPNG("testdata/gray-gradient.interlaced.png") if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a, b) { t.Fatalf("decodings differ:\nnon-interlaced:\n%#v\ninterlaced:\n%#v", a, b) } } func TestIncompleteIDATOnRowBoundary(t *testing.T) { // The following is an invalid 1x2 grayscale PNG image. The header is OK, // but the zlib-compressed IDAT payload contains two bytes "\x02\x00", // which is only one row of data (the leading "\x02" is a row filter). const ( ihdr = "\x00\x00\x00\x0dIHDR\x00\x00\x00\x01\x00\x00\x00\x02\x08\x00\x00\x00\x00\xbc\xea\xe9\xfb" idat = "\x00\x00\x00\x0eIDAT\x78\x9c\x62\x62\x00\x04\x00\x00\xff\xff\x00\x06\x00\x03\xfa\xd0\x59\xae" iend = "\x00\x00\x00\x00IEND\xae\x42\x60\x82" ) _, err := Decode(strings.NewReader(pngHeader + ihdr + idat + iend)) if err == nil { t.Fatal("got nil error, want non-nil") } } func TestTrailingIDATChunks(t *testing.T) { // The following is a valid 1x1 PNG image containing color.Gray{255} and // a trailing zero-length IDAT chunk (see PNG specification section 12.9): const ( ihdr = "\x00\x00\x00\x0dIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00\x3a\x7e\x9b\x55" idatWhite = "\x00\x00\x00\x0eIDAT\x78\x9c\x62\xfa\x0f\x08\x00\x00\xff\xff\x01\x05\x01\x02\x5a\xdd\x39\xcd" idatZero = "\x00\x00\x00\x00IDAT\x35\xaf\x06\x1e" iend = "\x00\x00\x00\x00IEND\xae\x42\x60\x82" ) _, err := Decode(strings.NewReader(pngHeader + ihdr + idatWhite + idatZero + iend)) if err != nil { t.Fatalf("decoding valid image: %v", err) } // Non-zero-length trailing IDAT chunks should be ignored (recoverable error). // The following chunk contains a single pixel with color.Gray{0}. const idatBlack = "\x00\x00\x00\x0eIDAT\x78\x9c\x62\x62\x00\x04\x00\x00\xff\xff\x00\x06\x00\x03\xfa\xd0\x59\xae" img, err := Decode(strings.NewReader(pngHeader + ihdr + idatWhite + idatBlack + iend)) if err != nil { t.Fatalf("trailing IDAT not ignored: %v", err) } if img.At(0, 0) == (color.Gray{0}) { t.Fatal("decoded image from trailing IDAT chunk") } } func TestMultipletRNSChunks(t *testing.T) { /* The following is a valid 1x1 paletted PNG image with a 1-element palette containing color.NRGBA{0xff, 0x00, 0x00, 0x7f}: 0000000: 8950 4e47 0d0a 1a0a 0000 000d 4948 4452 .PNG........IHDR 0000010: 0000 0001 0000 0001 0803 0000 0028 cb34 .............(.4 0000020: bb00 0000 0350 4c54 45ff 0000 19e2 0937 .....PLTE......7 0000030: 0000 0001 7452 4e53 7f80 5cb4 cb00 0000 ....tRNS..\..... 0000040: 0e49 4441 5478 9c62 6200 0400 00ff ff00 .IDATx.bb....... 0000050: 0600 03fa d059 ae00 0000 0049 454e 44ae .....Y.....IEND. 0000060: 4260 82 B`. Dropping the tRNS chunk makes that color's alpha 0xff instead of 0x7f. */ const ( ihdr = "\x00\x00\x00\x0dIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x03\x00\x00\x00\x28\xcb\x34\xbb" plte = "\x00\x00\x00\x03PLTE\xff\x00\x00\x19\xe2\x09\x37" trns = "\x00\x00\x00\x01tRNS\x7f\x80\x5c\xb4\xcb" idat = "\x00\x00\x00\x0eIDAT\x78\x9c\x62\x62\x00\x04\x00\x00\xff\xff\x00\x06\x00\x03\xfa\xd0\x59\xae" iend = "\x00\x00\x00\x00IEND\xae\x42\x60\x82" ) for i := 0; i < 4; i++ { var b []byte b = append(b, pngHeader...) b = append(b, ihdr...) b = append(b, plte...) for j := 0; j < i; j++ { b = append(b, trns...) } b = append(b, idat...) b = append(b, iend...) var want color.Color m, err := Decode(bytes.NewReader(b)) switch i { case 0: if err != nil { t.Errorf("%d tRNS chunks: %v", i, err) continue } want = color.RGBA{0xff, 0x00, 0x00, 0xff} case 1: if err != nil { t.Errorf("%d tRNS chunks: %v", i, err) continue } want = color.NRGBA{0xff, 0x00, 0x00, 0x7f} default: if err == nil { t.Errorf("%d tRNS chunks: got nil error, want non-nil", i) } continue } if got := m.At(0, 0); got != want { t.Errorf("%d tRNS chunks: got %T %v, want %T %v", i, got, got, want, want) } } } func TestUnknownChunkLengthUnderflow(t *testing.T) { data := []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x06, 0xf4, 0x7c, 0x55, 0x04, 0x1a, 0xd3, 0x11, 0x9a, 0x73, 0x00, 0x00, 0xf8, 0x1e, 0xf3, 0x2e, 0x00, 0x00, 0x01, 0x00, 0xff, 0xff, 0xff, 0xff, 0x07, 0xf4, 0x7c, 0x55, 0x04, 0x1a, 0xd3} _, err := Decode(bytes.NewReader(data)) if err == nil { t.Errorf("Didn't fail reading an unknown chunk with length 0xffffffff") } } func TestPaletted8OutOfRangePixel(t *testing.T) { // IDAT contains a reference to a palette index that does not exist in the file. img, err := readPNG("testdata/invalid-palette.png") if err != nil { t.Errorf("decoding invalid-palette.png: unexpected error %v", err) return } // Expect that the palette is extended with opaque black. want := color.RGBA{0x00, 0x00, 0x00, 0xff} if got := img.At(15, 15); got != want { t.Errorf("got %F %v, expected %T %v", got, got, want, want) } } func TestGray8Transparent(t *testing.T) { // These bytes come from https://golang.org/issues/19553 m, err := Decode(bytes.NewReader([]byte{ 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x85, 0x2c, 0x88, 0x80, 0x00, 0x00, 0x00, 0x02, 0x74, 0x52, 0x4e, 0x53, 0x00, 0xff, 0x5b, 0x91, 0x22, 0xb5, 0x00, 0x00, 0x00, 0x02, 0x62, 0x4b, 0x47, 0x44, 0x00, 0xff, 0x87, 0x8f, 0xcc, 0xbf, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0a, 0xf0, 0x00, 0x00, 0x0a, 0xf0, 0x01, 0x42, 0xac, 0x34, 0x98, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 0x45, 0x07, 0xd5, 0x04, 0x02, 0x12, 0x11, 0x11, 0xf7, 0x65, 0x3d, 0x8b, 0x00, 0x00, 0x00, 0x4f, 0x49, 0x44, 0x41, 0x54, 0x08, 0xd7, 0x63, 0xf8, 0xff, 0xff, 0xff, 0xb9, 0xbd, 0x70, 0xf0, 0x8c, 0x01, 0xc8, 0xaf, 0x6e, 0x99, 0x02, 0x05, 0xd9, 0x7b, 0xc1, 0xfc, 0x6b, 0xff, 0xa1, 0xa0, 0x87, 0x30, 0xff, 0xd9, 0xde, 0xbd, 0xd5, 0x4b, 0xf7, 0xee, 0xfd, 0x0e, 0xe3, 0xef, 0xcd, 0x06, 0x19, 0x14, 0xf5, 0x1e, 0xce, 0xef, 0x01, 0x31, 0x92, 0xd7, 0x82, 0x41, 0x31, 0x9c, 0x3f, 0x07, 0x02, 0xee, 0xa1, 0xaa, 0xff, 0xff, 0x9f, 0xe1, 0xd9, 0x56, 0x30, 0xf8, 0x0e, 0xe5, 0x03, 0x00, 0xa9, 0x42, 0x84, 0x3d, 0xdf, 0x8f, 0xa6, 0x8f, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, })) if err != nil { t.Fatalf("Decode: %v", err) } const hex = "0123456789abcdef" var got []byte bounds := m.Bounds() for y := bounds.Min.Y; y < bounds.Max.Y; y++ { for x := bounds.Min.X; x < bounds.Max.X; x++ { if r, _, _, a := m.At(x, y).RGBA(); a != 0 { got = append(got, hex[0x0f&(r>>12)], hex[0x0f&(r>>8)], ' ', ) } else { got = append(got, '.', '.', ' ', ) } } got = append(got, '\n') } const want = "" + ".. .. .. ce bd bd bd bd bd bd bd bd bd bd e6 \n" + ".. .. .. 7b 84 94 94 94 94 94 94 94 94 6b bd \n" + ".. .. .. 7b d6 .. .. .. .. .. .. .. .. 8c bd \n" + ".. .. .. 7b d6 .. .. .. .. .. .. .. .. 8c bd \n" + ".. .. .. 7b d6 .. .. .. .. .. .. .. .. 8c bd \n" + "e6 bd bd 7b a5 bd bd f7 .. .. .. .. .. 8c bd \n" + "bd 6b 94 94 94 94 5a ef .. .. .. .. .. 8c bd \n" + "bd 8c .. .. .. .. 63 ad ad ad ad ad ad 73 bd \n" + "bd 8c .. .. .. .. 63 9c 9c 9c 9c 9c 9c 9c de \n" + "bd 6b 94 94 94 94 5a ef .. .. .. .. .. .. .. \n" + "e6 b5 b5 b5 b5 b5 b5 f7 .. .. .. .. .. .. .. \n" if string(got) != want { t.Errorf("got:\n%swant:\n%s", got, want) } } func TestDimensionOverflow(t *testing.T) { maxInt32AsInt := int((1 << 31) - 1) have32BitInts := 0 > (1 + maxInt32AsInt) testCases := []struct { src []byte unsupportedConfig bool width int height int }{ // These bytes come from https://golang.org/issues/22304 // // It encodes a 2147483646 × 2147483646 (i.e. 0x7ffffffe × 0x7ffffffe) // NRGBA image. The (width × height) per se doesn't overflow an int64, but // (width × height × bytesPerPixel) will. { src: []byte{ 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x7f, 0xff, 0xff, 0xfe, 0x7f, 0xff, 0xff, 0xfe, 0x08, 0x06, 0x00, 0x00, 0x00, 0x30, 0x57, 0xb3, 0xfd, 0x00, 0x00, 0x00, 0x15, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x62, 0x62, 0x20, 0x12, 0x8c, 0x2a, 0xa4, 0xb3, 0x42, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x38, 0x00, 0x15, 0x2d, 0xef, 0x5f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, }, // It's debatable whether DecodeConfig (which does not allocate a // pixel buffer, unlike Decode) should fail in this case. The Go // standard library has made its choice, and the standard library // has compatibility constraints. unsupportedConfig: true, width: 0x7ffffffe, height: 0x7ffffffe, }, // The next three cases come from https://golang.org/issues/38435 { src: []byte{ 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0xb5, 0x04, 0x00, 0x00, 0xb5, 0x04, 0x08, 0x06, 0x00, 0x00, 0x00, 0xf5, 0x60, 0x2c, 0xb8, 0x00, 0x00, 0x00, 0x15, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x62, 0x62, 0x20, 0x12, 0x8c, 0x2a, 0xa4, 0xb3, 0x42, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x38, 0x00, 0x15, 0x2d, 0xef, 0x5f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, }, // Here, width * height = 0x7ffea810, just under MaxInt32, but at 4 // bytes per pixel, the number of pixels overflows an int32. unsupportedConfig: have32BitInts, width: 0x0000b504, height: 0x0000b504, }, { src: []byte{ 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x06, 0x00, 0x00, 0x00, 0x30, 0x6e, 0xc5, 0x21, 0x00, 0x00, 0x00, 0x15, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x62, 0x62, 0x20, 0x12, 0x8c, 0x2a, 0xa4, 0xb3, 0x42, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x38, 0x00, 0x15, 0x2d, 0xef, 0x5f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, }, unsupportedConfig: false, width: 0x04000000, height: 0x00000001, }, { src: []byte{ 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x06, 0x00, 0x00, 0x00, 0xaa, 0xd4, 0x7c, 0xda, 0x00, 0x00, 0x00, 0x15, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x62, 0x66, 0x20, 0x12, 0x30, 0x8d, 0x2a, 0xa4, 0xaf, 0x42, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0xd2, 0x00, 0x16, 0x00, 0x00, 0x00, }, unsupportedConfig: false, width: 0x08000000, height: 0x00000001, }, } for i, tc := range testCases { cfg, err := DecodeConfig(bytes.NewReader(tc.src)) if tc.unsupportedConfig { if err == nil { t.Errorf("i=%d: DecodeConfig: got nil error, want non-nil", i) } else if _, ok := err.(UnsupportedError); !ok { t.Fatalf("Decode: got %v (of type %T), want non-nil error (of type png.UnsupportedError)", err, err) } continue } else if err != nil { t.Errorf("i=%d: DecodeConfig: %v", i, err) continue } else if cfg.Width != tc.width { t.Errorf("i=%d: width: got %d, want %d", i, cfg.Width, tc.width) continue } else if cfg.Height != tc.height { t.Errorf("i=%d: height: got %d, want %d", i, cfg.Height, tc.height) continue } if nPixels := int64(cfg.Width) * int64(cfg.Height); nPixels > 0x7f000000 { // In theory, calling Decode would succeed, given several gigabytes // of memory. In practice, trying to make a []uint8 big enough to // hold all of the pixels can often result in OOM (out of memory). // OOM is unrecoverable; we can't write a test that passes when OOM // happens. Instead we skip the Decode call (and its tests). continue } else if testing.Short() { // Even for smaller image dimensions, calling Decode might allocate // 1 GiB or more of memory. This is usually feasible, and we want // to check that calling Decode doesn't panic if there's enough // memory, but we provide a runtime switch (testing.Short) to skip // these if it would OOM. See also http://golang.org/issue/5050 // "decoding... images can cause huge memory allocations". continue } // Even if we don't panic, these aren't valid PNG images. if _, err := Decode(bytes.NewReader(tc.src)); err == nil { t.Errorf("i=%d: Decode: got nil error, want non-nil", i) } } if testing.Short() { t.Skip("skipping tests which allocate large pixel buffers") } } func TestDecodePalettedWithTransparency(t *testing.T) { // These bytes come from https://go.dev/issue/54325 // // Per the PNG spec, a PLTE chunk contains 3 (not 4) bytes per palette // entry: RGB (not RGBA). The alpha value comes from the optional tRNS // chunk. Here, the PLTE chunk (0x50, 0x4c, 0x54, 0x45, etc) has 16 entries // (0x30 = 48 bytes) and the tRNS chunk (0x74, 0x52, 0x4e, 0x53, etc) has 1 // entry (0x01 = 1 byte) that sets the first palette entry's alpha to zero. // // Both Decode and DecodeConfig should pick up that the first palette // entry's alpha is zero. src := []byte{ 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x04, 0x03, 0x00, 0x00, 0x00, 0x81, 0x54, 0x67, 0xc7, 0x00, 0x00, 0x00, 0x30, 0x50, 0x4c, 0x54, 0x45, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x00, 0x23, 0x27, 0x7b, 0xb1, 0x2d, 0x0a, 0x49, 0x3f, 0x19, 0x78, 0x5f, 0xcd, 0xe4, 0x69, 0x69, 0xe4, 0x71, 0x59, 0x53, 0x80, 0x11, 0x14, 0x8b, 0x00, 0xa9, 0x8d, 0x95, 0xcb, 0x99, 0x2f, 0x6b, 0xd7, 0x29, 0x91, 0xd7, 0x7b, 0xba, 0xff, 0xe3, 0xd7, 0x13, 0xc6, 0xd3, 0x58, 0x00, 0x00, 0x00, 0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66, 0x00, 0x00, 0x00, 0xfd, 0x49, 0x44, 0x41, 0x54, 0x28, 0xcf, 0x63, 0x60, 0x00, 0x83, 0x55, 0x0c, 0x68, 0x60, 0x9d, 0x02, 0x9a, 0x80, 0xde, 0x23, 0x74, 0x15, 0xef, 0x50, 0x94, 0x70, 0x2d, 0xd2, 0x7b, 0x87, 0xa2, 0x84, 0xeb, 0xee, 0xbb, 0x77, 0x6f, 0x51, 0x94, 0xe8, 0xbd, 0x7d, 0xf7, 0xee, 0x12, 0xb2, 0x80, 0xd2, 0x3d, 0x54, 0x01, 0x26, 0x10, 0x1f, 0x59, 0x40, 0x0f, 0xc8, 0xd7, 0x7e, 0x84, 0x70, 0x1c, 0xd7, 0xba, 0xb7, 0x4a, 0xda, 0xda, 0x77, 0x11, 0xf6, 0xac, 0x5a, 0xa5, 0xf4, 0xf9, 0xbf, 0xfd, 0x3d, 0x24, 0x6b, 0x98, 0x94, 0xf4, 0xff, 0x7f, 0x52, 0x42, 0x16, 0x30, 0x0e, 0xd9, 0xed, 0x6a, 0x8c, 0xec, 0x10, 0x65, 0x53, 0x97, 0x60, 0x23, 0x64, 0x1d, 0x8a, 0x2e, 0xc6, 0x2e, 0x42, 0x08, 0x3d, 0x4c, 0xca, 0x81, 0xc1, 0x82, 0xa6, 0xa2, 0x46, 0x08, 0x3d, 0x4a, 0xa1, 0x82, 0xc6, 0x82, 0xa1, 0x4a, 0x08, 0x3d, 0xfa, 0xa6, 0x81, 0xa1, 0xa2, 0xc1, 0x9f, 0x10, 0x66, 0xd4, 0x2b, 0x87, 0x0a, 0x86, 0x1a, 0x7d, 0x57, 0x80, 0x9b, 0x99, 0xaf, 0x62, 0x1a, 0x1a, 0xec, 0xf0, 0x0d, 0x66, 0x2a, 0x7b, 0x5a, 0xba, 0xd2, 0x64, 0x63, 0x4b, 0xa6, 0xb2, 0xb4, 0x02, 0xa8, 0x12, 0xb5, 0x24, 0xa5, 0x99, 0x2e, 0x33, 0x95, 0xd4, 0x92, 0x10, 0xee, 0xd0, 0x59, 0xb9, 0x6a, 0xd6, 0x21, 0x24, 0xb7, 0x33, 0x9d, 0x01, 0x01, 0x64, 0xbf, 0xac, 0x59, 0xb2, 0xca, 0xeb, 0x14, 0x92, 0x80, 0xd6, 0x9a, 0x53, 0x4a, 0x6b, 0x4e, 0x2d, 0x42, 0x52, 0xa1, 0x73, 0x28, 0x54, 0xe7, 0x90, 0x6a, 0x00, 0x92, 0x92, 0x45, 0xa1, 0x40, 0x84, 0x2c, 0xe0, 0xc4, 0xa0, 0xb2, 0x28, 0x14, 0xc1, 0x67, 0xe9, 0x50, 0x60, 0x60, 0xea, 0x70, 0x40, 0x12, 0x00, 0x79, 0x54, 0x09, 0x22, 0x00, 0x00, 0x30, 0xf3, 0x52, 0x87, 0xc6, 0xe4, 0xbd, 0x70, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, } cfg, err := DecodeConfig(bytes.NewReader(src)) if err != nil { t.Fatalf("DecodeConfig: %v", err) } else if _, _, _, alpha := cfg.ColorModel.(color.Palette)[0].RGBA(); alpha != 0 { t.Errorf("DecodeConfig: got %d, want 0", alpha) } img, err := Decode(bytes.NewReader(src)) if err != nil { t.Fatalf("Decode: %v", err) } else if _, _, _, alpha := img.ColorModel().(color.Palette)[0].RGBA(); alpha != 0 { t.Errorf("Decode: got %d, want 0", alpha) } } func benchmarkDecode(b *testing.B, filename string, bytesPerPixel int) { data, err := os.ReadFile(filename) if err != nil { b.Fatal(err) } cfg, err := DecodeConfig(bytes.NewReader(data)) if err != nil { b.Fatal(err) } b.SetBytes(int64(cfg.Width * cfg.Height * bytesPerPixel)) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { Decode(bytes.NewReader(data)) } } func BenchmarkDecodeGray(b *testing.B) { benchmarkDecode(b, "testdata/benchGray.png", 1) } func BenchmarkDecodeNRGBAGradient(b *testing.B) { benchmarkDecode(b, "testdata/benchNRGBA-gradient.png", 4) } func BenchmarkDecodeNRGBAOpaque(b *testing.B) { benchmarkDecode(b, "testdata/benchNRGBA-opaque.png", 4) } func BenchmarkDecodePaletted(b *testing.B) { benchmarkDecode(b, "testdata/benchPaletted.png", 1) } func BenchmarkDecodeRGB(b *testing.B) { benchmarkDecode(b, "testdata/benchRGB.png", 4) } func BenchmarkDecodeInterlacing(b *testing.B) { benchmarkDecode(b, "testdata/benchRGB-interlace.png", 4) }
go/src/image/png/reader_test.go/0
{ "file_path": "go/src/image/png/reader_test.go", "repo_id": "go", "token_count": 15128 }
268
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build ignore // Gen generates sais2.go by duplicating functions in sais.go // using different input types. // See the comment at the top of sais.go for details. package main import ( "bytes" "log" "os" "strings" ) func main() { log.SetPrefix("gen: ") log.SetFlags(0) data, err := os.ReadFile("sais.go") if err != nil { log.Fatal(err) } x := bytes.Index(data, []byte("\n\n")) if x < 0 { log.Fatal("cannot find blank line after copyright comment") } var buf bytes.Buffer buf.Write(data[:x]) buf.WriteString("\n\n// Code generated by go generate; DO NOT EDIT.\n\npackage suffixarray\n") for { x := bytes.Index(data, []byte("\nfunc ")) if x < 0 { break } data = data[x:] p := bytes.IndexByte(data, '(') if p < 0 { p = len(data) } name := string(data[len("\nfunc "):p]) x = bytes.Index(data, []byte("\n}\n")) if x < 0 { log.Fatalf("cannot find end of func %s", name) } fn := string(data[:x+len("\n}\n")]) data = data[x+len("\n}"):] if strings.HasSuffix(name, "_32") { buf.WriteString(fix32.Replace(fn)) } if strings.HasSuffix(name, "_8_32") { // x_8_32 -> x_8_64 done above fn = fix8_32.Replace(stripByteOnly(fn)) buf.WriteString(fn) buf.WriteString(fix32.Replace(fn)) } } if err := os.WriteFile("sais2.go", buf.Bytes(), 0666); err != nil { log.Fatal(err) } } var fix32 = strings.NewReplacer( "32", "64", "int32", "int64", ) var fix8_32 = strings.NewReplacer( "_8_32", "_32", "byte", "int32", ) func stripByteOnly(s string) string { lines := strings.SplitAfter(s, "\n") w := 0 for _, line := range lines { if !strings.Contains(line, "256") && !strings.Contains(line, "byte-only") { lines[w] = line w++ } } return strings.Join(lines[:w], "") }
go/src/index/suffixarray/gen.go/0
{ "file_path": "go/src/index/suffixarray/gen.go", "repo_id": "go", "token_count": 817 }
269
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package abi func FuncPCTestFn() var FuncPCTestFnAddr uintptr // address of FuncPCTestFn, directly retrieved from assembly //go:noinline func FuncPCTest() uintptr { return FuncPCABI0(FuncPCTestFn) }
go/src/internal/abi/export_test.go/0
{ "file_path": "go/src/internal/abi/export_test.go", "repo_id": "go", "token_count": 123 }
270
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "go_asm.h" #include "textflag.h" TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56 // X10 = a_base // X11 = a_len // X12 = a_cap (unused) // X13 = b_base (want in X12) // X14 = b_len (want in X13) // X15 = b_cap (unused) MOV X13, X12 MOV X14, X13 JMP compare<>(SB) TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40 // X10 = a_base // X11 = a_len // X12 = b_base // X13 = b_len JMP compare<>(SB) // On entry: // X10 points to start of a // X11 length of a // X12 points to start of b // X13 length of b // for non-regabi X14 points to the address to store the return value (-1/0/1) // for regabi the return value in X10 TEXT compare<>(SB),NOSPLIT|NOFRAME,$0 BEQ X10, X12, cmp_len MOV X11, X5 BGE X13, X5, use_a_len // X5 = min(len(a), len(b)) MOV X13, X5 use_a_len: BEQZ X5, cmp_len MOV $32, X6 BLT X5, X6, check8_unaligned // Check alignment - if alignment differs we have to do one byte at a time. AND $7, X10, X7 AND $7, X12, X8 BNE X7, X8, check8_unaligned BEQZ X7, compare32 // Check one byte at a time until we reach 8 byte alignment. SUB X7, X0, X7 ADD $8, X7, X7 SUB X7, X5, X5 align: SUB $1, X7 MOVBU 0(X10), X8 MOVBU 0(X12), X9 BNE X8, X9, cmp ADD $1, X10 ADD $1, X12 BNEZ X7, align check32: // X6 contains $32 BLT X5, X6, compare16 compare32: MOV 0(X10), X15 MOV 0(X12), X16 MOV 8(X10), X17 MOV 8(X12), X18 BNE X15, X16, cmp8a BNE X17, X18, cmp8b MOV 16(X10), X15 MOV 16(X12), X16 MOV 24(X10), X17 MOV 24(X12), X18 BNE X15, X16, cmp8a BNE X17, X18, cmp8b ADD $32, X10 ADD $32, X12 SUB $32, X5 BGE X5, X6, compare32 BEQZ X5, cmp_len check16: MOV $16, X6 BLT X5, X6, check8_unaligned compare16: MOV 0(X10), X15 MOV 0(X12), X16 MOV 8(X10), X17 MOV 8(X12), X18 BNE X15, X16, cmp8a BNE X17, X18, cmp8b ADD $16, X10 ADD $16, X12 SUB $16, X5 BEQZ X5, cmp_len check8_unaligned: MOV $8, X6 BLT X5, X6, check4_unaligned compare8_unaligned: MOVBU 0(X10), X8 MOVBU 1(X10), X15 MOVBU 2(X10), X17 MOVBU 3(X10), X19 MOVBU 4(X10), X21 MOVBU 5(X10), X23 MOVBU 6(X10), X25 MOVBU 7(X10), X29 MOVBU 0(X12), X9 MOVBU 1(X12), X16 MOVBU 2(X12), X18 MOVBU 3(X12), X20 MOVBU 4(X12), X22 MOVBU 5(X12), X24 MOVBU 6(X12), X28 MOVBU 7(X12), X30 BNE X8, X9, cmp1a BNE X15, X16, cmp1b BNE X17, X18, cmp1c BNE X19, X20, cmp1d BNE X21, X22, cmp1e BNE X23, X24, cmp1f BNE X25, X28, cmp1g BNE X29, X30, cmp1h ADD $8, X10 ADD $8, X12 SUB $8, X5 BGE X5, X6, compare8_unaligned BEQZ X5, cmp_len check4_unaligned: MOV $4, X6 BLT X5, X6, compare1 compare4_unaligned: MOVBU 0(X10), X8 MOVBU 1(X10), X15 MOVBU 2(X10), X17 MOVBU 3(X10), X19 MOVBU 0(X12), X9 MOVBU 1(X12), X16 MOVBU 2(X12), X18 MOVBU 3(X12), X20 BNE X8, X9, cmp1a BNE X15, X16, cmp1b BNE X17, X18, cmp1c BNE X19, X20, cmp1d ADD $4, X10 ADD $4, X12 SUB $4, X5 BGE X5, X6, compare4_unaligned compare1: BEQZ X5, cmp_len MOVBU 0(X10), X8 MOVBU 0(X12), X9 BNE X8, X9, cmp ADD $1, X10 ADD $1, X12 SUB $1, X5 JMP compare1 // Compare 8 bytes of memory in X15/X16 that are known to differ. cmp8a: MOV X15, X17 MOV X16, X18 // Compare 8 bytes of memory in X17/X18 that are known to differ. cmp8b: MOV $0xff, X19 cmp8_loop: AND X17, X19, X8 AND X18, X19, X9 BNE X8, X9, cmp SLLI $8, X19 JMP cmp8_loop cmp1a: SLTU X9, X8, X5 SLTU X8, X9, X6 JMP cmp_ret cmp1b: SLTU X16, X15, X5 SLTU X15, X16, X6 JMP cmp_ret cmp1c: SLTU X18, X17, X5 SLTU X17, X18, X6 JMP cmp_ret cmp1d: SLTU X20, X19, X5 SLTU X19, X20, X6 JMP cmp_ret cmp1e: SLTU X22, X21, X5 SLTU X21, X22, X6 JMP cmp_ret cmp1f: SLTU X24, X23, X5 SLTU X23, X24, X6 JMP cmp_ret cmp1g: SLTU X28, X25, X5 SLTU X25, X28, X6 JMP cmp_ret cmp1h: SLTU X30, X29, X5 SLTU X29, X30, X6 JMP cmp_ret cmp_len: MOV X11, X8 MOV X13, X9 cmp: SLTU X9, X8, X5 SLTU X8, X9, X6 cmp_ret: SUB X5, X6, X10 RET
go/src/internal/bytealg/compare_riscv64.s/0
{ "file_path": "go/src/internal/bytealg/compare_riscv64.s", "repo_id": "go", "token_count": 2386 }
271
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "go_asm.h" #include "textflag.h" #define REGCTXT R29 // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25 BEQ R4, R5, eq ADDV R4, R6, R7 PCALIGN $16 loop: BNE R4, R7, test MOVV $1, R4 RET test: MOVBU (R4), R9 ADDV $1, R4 MOVBU (R5), R10 ADDV $1, R5 BEQ R9, R10, loop MOVB R0, R4 RET eq: MOVV $1, R4 RET // memequal_varlen(a, b unsafe.Pointer) bool TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$40-17 BEQ R4, R5, eq MOVV 8(REGCTXT), R6 // compiler stores size at offset 8 in the closure MOVV R4, 8(R3) MOVV R5, 16(R3) MOVV R6, 24(R3) JAL runtime·memequal(SB) MOVBU 32(R3), R4 RET eq: MOVV $1, R4 RET
go/src/internal/bytealg/equal_loong64.s/0
{ "file_path": "go/src/internal/bytealg/equal_loong64.s", "repo_id": "go", "token_count": 441 }
272
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bytealg import "internal/cpu" const MaxBruteForce = 64 func init() { // Note: we're kind of lucky that this flag is available at this point. // The runtime sets HasVX when processing auxv records, and that happens // to happen *before* running the init functions of packages that // the runtime depends on. // TODO: it would really be nicer for internal/cpu to figure out this // flag by itself. Then we wouldn't need to depend on quirks of // early startup initialization order. if cpu.S390X.HasVX { MaxLen = 64 } } // Cutover reports the number of failures of IndexByte we should tolerate // before switching over to Index. // n is the number of bytes processed so far. // See the bytes.Index implementation for details. func Cutover(n int) int { // 1 error per 8 characters, plus a few slop to start. return (n + 16) / 8 }
go/src/internal/bytealg/index_s390x.go/0
{ "file_path": "go/src/internal/bytealg/index_s390x.go", "repo_id": "go", "token_count": 283 }
273
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package byteorder provides functions for decoding and encoding // little and big endian integer types from/to byte slices. package byteorder func LeUint16(b []byte) uint16 { _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 return uint16(b[0]) | uint16(b[1])<<8 } func LePutUint16(b []byte, v uint16) { _ = b[1] // early bounds check to guarantee safety of writes below b[0] = byte(v) b[1] = byte(v >> 8) } func LeAppendUint16(b []byte, v uint16) []byte { return append(b, byte(v), byte(v>>8), ) } func LeUint32(b []byte) uint32 { _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 } func LePutUint32(b []byte, v uint32) { _ = b[3] // early bounds check to guarantee safety of writes below b[0] = byte(v) b[1] = byte(v >> 8) b[2] = byte(v >> 16) b[3] = byte(v >> 24) } func LeAppendUint32(b []byte, v uint32) []byte { return append(b, byte(v), byte(v>>8), byte(v>>16), byte(v>>24), ) } func LeUint64(b []byte) uint64 { _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } func LePutUint64(b []byte, v uint64) { _ = b[7] // early bounds check to guarantee safety of writes below b[0] = byte(v) b[1] = byte(v >> 8) b[2] = byte(v >> 16) b[3] = byte(v >> 24) b[4] = byte(v >> 32) b[5] = byte(v >> 40) b[6] = byte(v >> 48) b[7] = byte(v >> 56) } func LeAppendUint64(b []byte, v uint64) []byte { return append(b, byte(v), byte(v>>8), byte(v>>16), byte(v>>24), byte(v>>32), byte(v>>40), byte(v>>48), byte(v>>56), ) } func BeUint16(b []byte) uint16 { _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 return uint16(b[1]) | uint16(b[0])<<8 } func BePutUint16(b []byte, v uint16) { _ = b[1] // early bounds check to guarantee safety of writes below b[0] = byte(v >> 8) b[1] = byte(v) } func BeAppendUint16(b []byte, v uint16) []byte { return append(b, byte(v>>8), byte(v), ) } func BeUint32(b []byte) uint32 { _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 } func BePutUint32(b []byte, v uint32) { _ = b[3] // early bounds check to guarantee safety of writes below b[0] = byte(v >> 24) b[1] = byte(v >> 16) b[2] = byte(v >> 8) b[3] = byte(v) } func BeAppendUint32(b []byte, v uint32) []byte { return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v), ) } func BeUint64(b []byte) uint64 { _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 } func BePutUint64(b []byte, v uint64) { _ = b[7] // early bounds check to guarantee safety of writes below b[0] = byte(v >> 56) b[1] = byte(v >> 48) b[2] = byte(v >> 40) b[3] = byte(v >> 32) b[4] = byte(v >> 24) b[5] = byte(v >> 16) b[6] = byte(v >> 8) b[7] = byte(v) } func BeAppendUint64(b []byte, v uint64) []byte { return append(b, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v), ) }
go/src/internal/byteorder/byteorder.go/0
{ "file_path": "go/src/internal/byteorder/byteorder.go", "repo_id": "go", "token_count": 1627 }
274
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cfile import "internal/runtime/exithook" // InitHook is invoked from the main package "init" routine in // programs built with "-cover". This function is intended to be // called only by the compiler (via runtime/coverage.initHook). // // If 'istest' is false, it indicates we're building a regular program // ("go build -cover ..."), in which case we immediately try to write // out the meta-data file, and register emitCounterData as an exit // hook. // // If 'istest' is true (indicating that the program in question is a // Go test binary), then we tentatively queue up both emitMetaData and // emitCounterData as exit hooks. In the normal case (e.g. regular "go // test -cover" run) the testmain.go boilerplate will run at the end // of the test, write out the coverage percentage, and then invoke // MarkProfileEmitted to indicate that no more work needs to be // done. If however that call is never made, this is a sign that the // test binary is being used as a replacement binary for the tool // being tested, hence we do want to run exit hooks when the program // terminates. func InitHook(istest bool) { // Note: hooks are run in reverse registration order, so // register the counter data hook before the meta-data hook // (in the case where two hooks are needed). exithook.Add(exithook.Hook{F: emitCounterData, RunOnFailure: true}) if istest { exithook.Add(exithook.Hook{F: emitMetaData, RunOnFailure: true}) } else { emitMetaData() } }
go/src/internal/coverage/cfile/hooks.go/0
{ "file_path": "go/src/internal/coverage/cfile/hooks.go", "repo_id": "go", "token_count": 457 }
275
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package coverage // Types and constants related to the output files written // by code coverage tooling. When a coverage-instrumented binary // is run, it emits two output files: a meta-data output file, and // a counter data output file. //..................................................................... // // Meta-data definitions: // // The meta-data file is composed of a file header, a series of // meta-data blobs/sections (one per instrumented package), and an offsets // area storing the offsets of each section. Format of the meta-data // file looks like: // // --header---------- // | magic: [4]byte magic string // | version // | total length of meta-data file in bytes // | numPkgs: number of package entries in file // | hash: [16]byte hash of entire meta-data payload // | offset to string table section // | length of string table // | number of entries in string table // | counter mode // | counter granularity // --package offsets table------ // <offset to pkg 0> // <offset to pkg 1> // ... // --package lengths table------ // <length of pkg 0> // <length of pkg 1> // ... // --string table------ // <uleb128 len> 8 // <data> "somestring" // ... // --package payloads------ // <meta-symbol for pkg 0> // <meta-symbol for pkg 1> // ... // // Each package payload is a stand-alone blob emitted by the compiler, // and does not depend on anything else in the meta-data file. In // particular, each blob has it's own string table. Note that the // file-level string table is expected to be very short (most strings // will be in the meta-data blobs themselves). // CovMetaMagic holds the magic string for a meta-data file. var CovMetaMagic = [4]byte{'\x00', '\x63', '\x76', '\x6d'} // MetaFilePref is a prefix used when emitting meta-data files; these // files are of the form "covmeta.<hash>", where hash is a hash // computed from the hashes of all the package meta-data symbols in // the program. const MetaFilePref = "covmeta" // MetaFileVersion contains the current (most recent) meta-data file version. const MetaFileVersion = 1 // MetaFileHeader stores file header information for a meta-data file. type MetaFileHeader struct { Magic [4]byte Version uint32 TotalLength uint64 Entries uint64 MetaFileHash [16]byte StrTabOffset uint32 StrTabLength uint32 CMode CounterMode CGranularity CounterGranularity _ [6]byte // padding } // MetaSymbolHeader stores header information for a single // meta-data blob, e.g. the coverage meta-data payload // computed for a given Go package. type MetaSymbolHeader struct { Length uint32 // size of meta-symbol payload in bytes PkgName uint32 // string table index PkgPath uint32 // string table index ModulePath uint32 // string table index MetaHash [16]byte _ byte // currently unused _ [3]byte // padding NumFiles uint32 NumFuncs uint32 } const CovMetaHeaderSize = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 // keep in sync with above // As an example, consider the following Go package: // // 01: package p // 02: // 03: var v, w, z int // 04: // 05: func small(x, y int) int { // 06: v++ // 07: // comment // 08: if y == 0 { // 09: return x // 10: } // 11: return (x << 1) ^ (9 / y) // 12: } // 13: // 14: func Medium(q, r int) int { // 15: s1 := small(q, r) // 16: z += s1 // 17: s2 := small(r, q) // 18: w -= s2 // 19: return w + z // 20: } // // The meta-data blob for the single package above might look like the // following: // // -- MetaSymbolHeader header---------- // | size: size of this blob in bytes // | packagepath: <path to p> // | modulepath: <modpath for p> // | nfiles: 1 // | nfunctions: 2 // --func offsets table------ // <offset to func 0> // <offset to func 1> // --string table (contains all files and functions)------ // | <uleb128 len> 4 // | <data> "p.go" // | <uleb128 len> 5 // | <data> "small" // | <uleb128 len> 6 // | <data> "Medium" // --func 0------ // | <uleb128> num units: 3 // | <uleb128> func name: S1 (index into string table) // | <uleb128> file: S0 (index into string table) // | <unit 0>: S0 L6 L8 2 // | <unit 1>: S0 L9 L9 1 // | <unit 2>: S0 L11 L11 1 // --func 1------ // | <uleb128> num units: 1 // | <uleb128> func name: S2 (index into string table) // | <uleb128> file: S0 (index into string table) // | <unit 0>: S0 L15 L19 5 // ---end----------- // The following types and constants used by the meta-data encoder/decoder. // FuncDesc encapsulates the meta-data definitions for a single Go function. // This version assumes that we're looking at a function before inlining; // if we want to capture a post-inlining view of the world, the // representations of source positions would need to be a good deal more // complicated. type FuncDesc struct { Funcname string Srcfile string Units []CoverableUnit Lit bool // true if this is a function literal } // CoverableUnit describes the source characteristics of a single // program unit for which we want to gather coverage info. Coverable // units are either "simple" or "intraline"; a "simple" coverable unit // corresponds to a basic block (region of straight-line code with no // jumps or control transfers). An "intraline" unit corresponds to a // logical clause nested within some other simple unit. A simple unit // will have a zero Parent value; for an intraline unit NxStmts will // be zero and Parent will be set to 1 plus the index of the // containing simple statement. Example: // // L7: q := 1 // L8: x := (y == 101 || launch() == false) // L9: r := x * 2 // // For the code above we would have three simple units (one for each // line), then an intraline unit describing the "launch() == false" // clause in line 8, with Parent pointing to the index of the line 8 // unit in the units array. // // Note: in the initial version of the coverage revamp, only simple // units will be in use. type CoverableUnit struct { StLine, StCol uint32 EnLine, EnCol uint32 NxStmts uint32 Parent uint32 } // CounterMode tracks the "flavor" of the coverage counters being // used in a given coverage-instrumented program. type CounterMode uint8 const ( CtrModeInvalid CounterMode = iota CtrModeSet // "set" mode CtrModeCount // "count" mode CtrModeAtomic // "atomic" mode CtrModeRegOnly // registration-only pseudo-mode CtrModeTestMain // testmain pseudo-mode ) func (cm CounterMode) String() string { switch cm { case CtrModeSet: return "set" case CtrModeCount: return "count" case CtrModeAtomic: return "atomic" case CtrModeRegOnly: return "regonly" case CtrModeTestMain: return "testmain" } return "<invalid>" } func ParseCounterMode(mode string) CounterMode { var cm CounterMode switch mode { case "set": cm = CtrModeSet case "count": cm = CtrModeCount case "atomic": cm = CtrModeAtomic case "regonly": cm = CtrModeRegOnly case "testmain": cm = CtrModeTestMain default: cm = CtrModeInvalid } return cm } // CounterGranularity tracks the granularity of the coverage counters being // used in a given coverage-instrumented program. type CounterGranularity uint8 const ( CtrGranularityInvalid CounterGranularity = iota CtrGranularityPerBlock CtrGranularityPerFunc ) func (cm CounterGranularity) String() string { switch cm { case CtrGranularityPerBlock: return "perblock" case CtrGranularityPerFunc: return "perfunc" } return "<invalid>" } // Name of file within the "go test -cover" temp coverdir directory // containing a list of meta-data files for packages being tested // in a "go test -coverpkg=... ..." run. This constant is shared // by the Go command and by the coverage runtime. const MetaFilesFileName = "metafiles.txt" // MetaFileCollection contains information generated by the Go command and // the read in by coverage test support functions within an executing // "go test -cover" binary. type MetaFileCollection struct { ImportPaths []string MetaFileFragments []string } //..................................................................... // // Counter data definitions: // // A counter data file is composed of a file header followed by one or // more "segments" (each segment representing a given run or partial // run of a give binary) followed by a footer. // CovCounterMagic holds the magic string for a coverage counter-data file. var CovCounterMagic = [4]byte{'\x00', '\x63', '\x77', '\x6d'} // CounterFileVersion stores the most recent counter data file version. const CounterFileVersion = 1 // CounterFileHeader stores files header information for a counter-data file. type CounterFileHeader struct { Magic [4]byte Version uint32 MetaHash [16]byte CFlavor CounterFlavor BigEndian bool _ [6]byte // padding } // CounterSegmentHeader encapsulates information about a specific // segment in a counter data file, which at the moment contains // counters data from a single execution of a coverage-instrumented // program. Following the segment header will be the string table and // args table, and then (possibly) padding bytes to bring the byte // size of the preamble up to a multiple of 4. Immediately following // that will be the counter payloads. // // The "args" section of a segment is used to store annotations // describing where the counter data came from; this section is // basically a series of key-value pairs (can be thought of as an // encoded 'map[string]string'). At the moment we only write os.Args() // data to this section, using pairs of the form "argc=<integer>", // "argv0=<os.Args[0]>", "argv1=<os.Args[1]>", and so on. In the // future the args table may also include things like GOOS/GOARCH // values, and/or tags indicating which tests were run to generate the // counter data. type CounterSegmentHeader struct { FcnEntries uint64 StrTabLen uint32 ArgsLen uint32 } // CounterFileFooter appears at the tail end of a counter data file, // and stores the number of segments it contains. type CounterFileFooter struct { Magic [4]byte _ [4]byte // padding NumSegments uint32 _ [4]byte // padding } // CounterFilePref is the file prefix used when emitting coverage data // output files. CounterFileTemplate describes the format of the file // name: prefix followed by meta-file hash followed by process ID // followed by emit UnixNanoTime. const CounterFilePref = "covcounters" const CounterFileTempl = "%s.%x.%d.%d" const CounterFileRegexp = `^%s\.(\S+)\.(\d+)\.(\d+)+$` // CounterFlavor describes how function and counters are // stored/represented in the counter section of the file. type CounterFlavor uint8 const ( // "Raw" representation: all values (pkg ID, func ID, num counters, // and counters themselves) are stored as uint32's. CtrRaw CounterFlavor = iota + 1 // "ULeb" representation: all values (pkg ID, func ID, num counters, // and counters themselves) are stored with ULEB128 encoding. CtrULeb128 ) func Round4(x int) int { return (x + 3) &^ 3 } //..................................................................... // // Runtime counter data definitions. // // At runtime within a coverage-instrumented program, the "counters" // object we associated with instrumented function can be thought of // as a struct of the following form: // // struct { // numCtrs uint32 // pkgid uint32 // funcid uint32 // counterArray [numBlocks]uint32 // } // // where "numCtrs" is the number of blocks / coverable units within the // function, "pkgid" is the unique index assigned to this package by // the runtime, "funcid" is the index of this function within its containing // package, and "counterArray" stores the actual counters. // // The counter variable itself is created not as a struct but as a flat // array of uint32's; we then use the offsets below to index into it. const NumCtrsOffset = 0 const PkgIdOffset = 1 const FuncIdOffset = 2 const FirstCtrOffset = 3
go/src/internal/coverage/defs.go/0
{ "file_path": "go/src/internal/coverage/defs.go", "repo_id": "go", "token_count": 3909 }
276
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package cpu implements processor feature detection // used by the Go standard library. package cpu import _ "unsafe" // for linkname // DebugOptions is set to true by the runtime if the OS supports reading // GODEBUG early in runtime startup. // This should not be changed after it is initialized. var DebugOptions bool // CacheLinePad is used to pad structs to avoid false sharing. type CacheLinePad struct{ _ [CacheLinePadSize]byte } // CacheLineSize is the CPU's assumed cache line size. // There is currently no runtime detection of the real cache line size // so we use the constant per GOARCH CacheLinePadSize as an approximation. var CacheLineSize uintptr = CacheLinePadSize // The booleans in X86 contain the correspondingly named cpuid feature bit. // HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. var X86 struct { _ CacheLinePad HasAES bool HasADX bool HasAVX bool HasAVX2 bool HasAVX512F bool HasAVX512BW bool HasAVX512VL bool HasBMI1 bool HasBMI2 bool HasERMS bool HasFSRM bool HasFMA bool HasOSXSAVE bool HasPCLMULQDQ bool HasPOPCNT bool HasRDTSCP bool HasSHA bool HasSSE3 bool HasSSSE3 bool HasSSE41 bool HasSSE42 bool _ CacheLinePad } // The booleans in ARM contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. var ARM struct { _ CacheLinePad HasVFPv4 bool HasIDIVA bool HasV7Atomics bool _ CacheLinePad } // The booleans in ARM64 contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. var ARM64 struct { _ CacheLinePad HasAES bool HasPMULL bool HasSHA1 bool HasSHA2 bool HasSHA512 bool HasCRC32 bool HasATOMICS bool HasCPUID bool HasDIT bool IsNeoverse bool _ CacheLinePad } var MIPS64X struct { _ CacheLinePad HasMSA bool // MIPS SIMD architecture _ CacheLinePad } // For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, // since there are no optional categories. There are some exceptions that also // require kernel support to work (darn, scv), so there are feature bits for // those as well. The minimum processor requirement is POWER8 (ISA 2.07). // The struct is padded to avoid false sharing. var PPC64 struct { _ CacheLinePad HasDARN bool // Hardware random number generator (requires kernel enablement) HasSCV bool // Syscall vectored (requires kernel enablement) IsPOWER8 bool // ISA v2.07 (POWER8) IsPOWER9 bool // ISA v3.00 (POWER9) IsPOWER10 bool // ISA v3.1 (POWER10) _ CacheLinePad } var S390X struct { _ CacheLinePad HasZARCH bool // z architecture mode is active [mandatory] HasSTFLE bool // store facility list extended [mandatory] HasLDISP bool // long (20-bit) displacements [mandatory] HasEIMM bool // 32-bit immediates [mandatory] HasDFP bool // decimal floating point HasETF3EH bool // ETF-3 enhanced HasMSA bool // message security assist (CPACF) HasAES bool // KM-AES{128,192,256} functions HasAESCBC bool // KMC-AES{128,192,256} functions HasAESCTR bool // KMCTR-AES{128,192,256} functions HasAESGCM bool // KMA-GCM-AES{128,192,256} functions HasGHASH bool // KIMD-GHASH function HasSHA1 bool // K{I,L}MD-SHA-1 functions HasSHA256 bool // K{I,L}MD-SHA-256 functions HasSHA512 bool // K{I,L}MD-SHA-512 functions HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records. HasVXE bool // vector-enhancements facility 1 HasKDSA bool // elliptic curve functions HasECDSA bool // NIST curves HasEDDSA bool // Edwards curves _ CacheLinePad } // CPU feature variables are accessed by assembly code in various packages. //go:linkname X86 //go:linkname ARM //go:linkname ARM64 //go:linkname MIPS64X //go:linkname PPC64 //go:linkname S390X // Initialize examines the processor and sets the relevant variables above. // This is called by the runtime package early in program initialization, // before normal init functions are run. env is set by runtime if the OS supports // cpu feature options in GODEBUG. func Initialize(env string) { doinit() processOptions(env) } // options contains the cpu debug options that can be used in GODEBUG. // Options are arch dependent and are added by the arch specific doinit functions. // Features that are mandatory for the specific GOARCH should not be added to options // (e.g. SSE2 on amd64). var options []option // Option names should be lower case. e.g. avx instead of AVX. type option struct { Name string Feature *bool Specified bool // whether feature value was specified in GODEBUG Enable bool // whether feature should be enabled } // processOptions enables or disables CPU feature values based on the parsed env string. // The env string is expected to be of the form cpu.feature1=value1,cpu.feature2=value2... // where feature names is one of the architecture specific list stored in the // cpu packages options variable and values are either 'on' or 'off'. // If env contains cpu.all=off then all cpu features referenced through the options // variable are disabled. Other feature names and values result in warning messages. func processOptions(env string) { field: for env != "" { field := "" i := indexByte(env, ',') if i < 0 { field, env = env, "" } else { field, env = env[:i], env[i+1:] } if len(field) < 4 || field[:4] != "cpu." { continue } i = indexByte(field, '=') if i < 0 { print("GODEBUG: no value specified for \"", field, "\"\n") continue } key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" var enable bool switch value { case "on": enable = true case "off": enable = false default: print("GODEBUG: value \"", value, "\" not supported for cpu option \"", key, "\"\n") continue field } if key == "all" { for i := range options { options[i].Specified = true options[i].Enable = enable } continue field } for i := range options { if options[i].Name == key { options[i].Specified = true options[i].Enable = enable continue field } } print("GODEBUG: unknown cpu feature \"", key, "\"\n") } for _, o := range options { if !o.Specified { continue } if o.Enable && !*o.Feature { print("GODEBUG: can not enable \"", o.Name, "\", missing CPU support\n") continue } *o.Feature = o.Enable } } // indexByte returns the index of the first instance of c in s, // or -1 if c is not present in s. // indexByte is semantically the same as [strings.IndexByte]. // We copy this function because "internal/cpu" should not have external dependencies. func indexByte(s string, c byte) int { for i := 0; i < len(s); i++ { if s[i] == c { return i } } return -1 }
go/src/internal/cpu/cpu.go/0
{ "file_path": "go/src/internal/cpu/cpu.go", "repo_id": "go", "token_count": 2584 }
277
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !386 && !amd64 && !ppc64 && !ppc64le package cpu // Name returns the CPU name given by the vendor // if it can be read directly from memory or by CPU instructions. // If the CPU name can not be determined an empty string is returned. // // Implementations that use the Operating System (e.g. sysctl or /sys/) // to gather CPU information for display should be placed in internal/sysinfo. func Name() string { // "A CPU has no name". return "" }
go/src/internal/cpu/cpu_no_name.go/0
{ "file_path": "go/src/internal/cpu/cpu_no_name.go", "repo_id": "go", "token_count": 171 }
278
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package dag // Transpose reverses all edges in g. func (g *Graph) Transpose() { old := g.edges g.edges = make(map[string]map[string]bool) for _, n := range g.Nodes { g.edges[n] = make(map[string]bool) } for from, tos := range old { for to := range tos { g.edges[to][from] = true } } } // Topo returns a topological sort of g. This function is deterministic. func (g *Graph) Topo() []string { topo := make([]string, 0, len(g.Nodes)) marks := make(map[string]bool) var visit func(n string) visit = func(n string) { if marks[n] { return } for _, to := range g.Edges(n) { visit(to) } marks[n] = true topo = append(topo, n) } for _, root := range g.Nodes { visit(root) } for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 { topo[i], topo[j] = topo[j], topo[i] } return topo } // TransitiveReduction removes edges from g that are transitively // reachable. g must be transitively closed. func (g *Graph) TransitiveReduction() { // For i -> j -> k, if i -> k exists, delete it. for _, i := range g.Nodes { for _, j := range g.Nodes { if g.HasEdge(i, j) { for _, k := range g.Nodes { if g.HasEdge(j, k) { g.DelEdge(i, k) } } } } } }
go/src/internal/dag/alg.go/0
{ "file_path": "go/src/internal/dag/alg.go", "repo_id": "go", "token_count": 599 }
279
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fuzz import ( "bytes" "fmt" "os" "unsafe" ) // sharedMem manages access to a region of virtual memory mapped from a file, // shared between multiple processes. The region includes space for a header and // a value of variable length. // // When fuzzing, the coordinator creates a sharedMem from a temporary file for // each worker. This buffer is used to pass values to fuzz between processes. // Care must be taken to manage access to shared memory across processes; // sharedMem provides no synchronization on its own. See workerComm for an // explanation. type sharedMem struct { // f is the file mapped into memory. f *os.File // region is the mapped region of virtual memory for f. The content of f may // be read or written through this slice. region []byte // removeOnClose is true if the file should be deleted by Close. removeOnClose bool // sys contains OS-specific information. sys sharedMemSys } // sharedMemHeader stores metadata in shared memory. type sharedMemHeader struct { // count is the number of times the worker has called the fuzz function. // May be reset by coordinator. count int64 // valueLen is the number of bytes in region which should be read. valueLen int // randState and randInc hold the state of a pseudo-random number generator. randState, randInc uint64 // rawInMem is true if the region holds raw bytes, which occurs during // minimization. If true after the worker fails during minimization, this // indicates that an unrecoverable error occurred, and the region can be // used to retrieve the raw bytes that caused the error. rawInMem bool } // sharedMemSize returns the size needed for a shared memory buffer that can // contain values of the given size. func sharedMemSize(valueSize int) int { // TODO(jayconrod): set a reasonable maximum size per platform. return int(unsafe.Sizeof(sharedMemHeader{})) + valueSize } // sharedMemTempFile creates a new temporary file of the given size, then maps // it into memory. The file will be removed when the Close method is called. func sharedMemTempFile(size int) (m *sharedMem, err error) { // Create a temporary file. f, err := os.CreateTemp("", "fuzz-*") if err != nil { return nil, err } defer func() { if err != nil { f.Close() os.Remove(f.Name()) } }() // Resize it to the correct size. totalSize := sharedMemSize(size) if err := f.Truncate(int64(totalSize)); err != nil { return nil, err } // Map the file into memory. removeOnClose := true return sharedMemMapFile(f, totalSize, removeOnClose) } // header returns a pointer to metadata within the shared memory region. func (m *sharedMem) header() *sharedMemHeader { return (*sharedMemHeader)(unsafe.Pointer(&m.region[0])) } // valueRef returns the value currently stored in shared memory. The returned // slice points to shared memory; it is not a copy. func (m *sharedMem) valueRef() []byte { length := m.header().valueLen valueOffset := int(unsafe.Sizeof(sharedMemHeader{})) return m.region[valueOffset : valueOffset+length] } // valueCopy returns a copy of the value stored in shared memory. func (m *sharedMem) valueCopy() []byte { ref := m.valueRef() return bytes.Clone(ref) } // setValue copies the data in b into the shared memory buffer and sets // the length. len(b) must be less than or equal to the capacity of the buffer // (as returned by cap(m.value())). func (m *sharedMem) setValue(b []byte) { v := m.valueRef() if len(b) > cap(v) { panic(fmt.Sprintf("value length %d larger than shared memory capacity %d", len(b), cap(v))) } m.header().valueLen = len(b) copy(v[:cap(v)], b) } // setValueLen sets the length of the shared memory buffer returned by valueRef // to n, which may be at most the cap of that slice. // // Note that we can only store the length in the shared memory header. The full // slice header contains a pointer, which is likely only valid for one process, // since each process can map shared memory at a different virtual address. func (m *sharedMem) setValueLen(n int) { v := m.valueRef() if n > cap(v) { panic(fmt.Sprintf("length %d larger than shared memory capacity %d", n, cap(v))) } m.header().valueLen = n } // TODO(jayconrod): add method to resize the buffer. We'll need that when the // mutator can increase input length. Only the coordinator will be able to // do it, since we'll need to send a message to the worker telling it to // remap the file.
go/src/internal/fuzz/mem.go/0
{ "file_path": "go/src/internal/fuzz/mem.go", "repo_id": "go", "token_count": 1342 }
280
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build ignore package main import ( "bytes" "fmt" "log" "os" "strings" ) var goarches []string func main() { data, err := os.ReadFile("../../go/build/syslist.go") if err != nil { log.Fatal(err) } const goarchPrefix = `var knownArch = map[string]bool{` inGOARCH := false for _, line := range strings.Split(string(data), "\n") { if strings.HasPrefix(line, goarchPrefix) { inGOARCH = true } else if inGOARCH && strings.HasPrefix(line, "}") { break } else if inGOARCH { goarch := strings.Fields(line)[0] goarch = strings.TrimPrefix(goarch, `"`) goarch = strings.TrimSuffix(goarch, `":`) goarches = append(goarches, goarch) } } for _, target := range goarches { if target == "amd64p32" { continue } var buf bytes.Buffer fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n") fmt.Fprintf(&buf, "//go:build %s\n\n", target) // must explicitly include target for bootstrapping purposes fmt.Fprintf(&buf, "package goarch\n\n") fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target) for _, goarch := range goarches { value := 0 if goarch == target { value = 1 } fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goarch), value) } err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666) if err != nil { log.Fatal(err) } } }
go/src/internal/goarch/gengoarch.go/0
{ "file_path": "go/src/internal/goarch/gengoarch.go", "repo_id": "go", "token_count": 629 }
281
// Code generated by mkconsts.go. DO NOT EDIT. //go:build goexperiment.boringcrypto package goexperiment const BoringCrypto = true const BoringCryptoInt = 1
go/src/internal/goexperiment/exp_boringcrypto_on.go/0
{ "file_path": "go/src/internal/goexperiment/exp_boringcrypto_on.go", "repo_id": "go", "token_count": 53 }
282
// Code generated by mkconsts.go. DO NOT EDIT. //go:build goexperiment.preemptibleloops package goexperiment const PreemptibleLoops = true const PreemptibleLoopsInt = 1
go/src/internal/goexperiment/exp_preemptibleloops_on.go/0
{ "file_path": "go/src/internal/goexperiment/exp_preemptibleloops_on.go", "repo_id": "go", "token_count": 56 }
283
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build gc package goroot import ( "os" "os/exec" "path/filepath" "strings" "sync" ) // IsStandardPackage reports whether path is a standard package, // given goroot and compiler. func IsStandardPackage(goroot, compiler, path string) bool { switch compiler { case "gc": dir := filepath.Join(goroot, "src", path) dirents, err := os.ReadDir(dir) if err != nil { return false } for _, dirent := range dirents { if strings.HasSuffix(dirent.Name(), ".go") { return true } } return false case "gccgo": return gccgoSearch.isStandard(path) default: panic("unknown compiler " + compiler) } } // gccgoSearch holds the gccgo search directories. type gccgoDirs struct { once sync.Once dirs []string } // gccgoSearch is used to check whether a gccgo package exists in the // standard library. var gccgoSearch gccgoDirs // init finds the gccgo search directories. If this fails it leaves dirs == nil. func (gd *gccgoDirs) init() { gccgo := os.Getenv("GCCGO") if gccgo == "" { gccgo = "gccgo" } bin, err := exec.LookPath(gccgo) if err != nil { return } allDirs, err := exec.Command(bin, "-print-search-dirs").Output() if err != nil { return } versionB, err := exec.Command(bin, "-dumpversion").Output() if err != nil { return } version := strings.TrimSpace(string(versionB)) machineB, err := exec.Command(bin, "-dumpmachine").Output() if err != nil { return } machine := strings.TrimSpace(string(machineB)) dirsEntries := strings.Split(string(allDirs), "\n") const prefix = "libraries: =" var dirs []string for _, dirEntry := range dirsEntries { if strings.HasPrefix(dirEntry, prefix) { dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix)) break } } if len(dirs) == 0 { return } var lastDirs []string for _, dir := range dirs { goDir := filepath.Join(dir, "go", version) if fi, err := os.Stat(goDir); err == nil && fi.IsDir() { gd.dirs = append(gd.dirs, goDir) goDir = filepath.Join(goDir, machine) if fi, err = os.Stat(goDir); err == nil && fi.IsDir() { gd.dirs = append(gd.dirs, goDir) } } if fi, err := os.Stat(dir); err == nil && fi.IsDir() { lastDirs = append(lastDirs, dir) } } gd.dirs = append(gd.dirs, lastDirs...) } // isStandard reports whether path is a standard library for gccgo. func (gd *gccgoDirs) isStandard(path string) bool { // Quick check: if the first path component has a '.', it's not // in the standard library. This skips most GOPATH directories. i := strings.Index(path, "/") if i < 0 { i = len(path) } if strings.Contains(path[:i], ".") { return false } if path == "unsafe" { // Special case. return true } gd.once.Do(gd.init) if gd.dirs == nil { // We couldn't find the gccgo search directories. // Best guess, since the first component did not contain // '.', is that this is a standard library package. return true } for _, dir := range gd.dirs { full := filepath.Join(dir, path) + ".gox" if fi, err := os.Stat(full); err == nil && !fi.IsDir() { return true } } return false }
go/src/internal/goroot/gc.go/0
{ "file_path": "go/src/internal/goroot/gc.go", "repo_id": "go", "token_count": 1237 }
284
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pkgbits import ( "encoding/binary" "errors" "fmt" "go/constant" "go/token" "io" "math/big" "os" "runtime" "strings" ) // A PkgDecoder provides methods for decoding a package's Unified IR // export data. type PkgDecoder struct { // version is the file format version. version uint32 // sync indicates whether the file uses sync markers. sync bool // pkgPath is the package path for the package to be decoded. // // TODO(mdempsky): Remove; unneeded since CL 391014. pkgPath string // elemData is the full data payload of the encoded package. // Elements are densely and contiguously packed together. // // The last 8 bytes of elemData are the package fingerprint. elemData string // elemEnds stores the byte-offset end positions of element // bitstreams within elemData. // // For example, element I's bitstream data starts at elemEnds[I-1] // (or 0, if I==0) and ends at elemEnds[I]. // // Note: elemEnds is indexed by absolute indices, not // section-relative indices. elemEnds []uint32 // elemEndsEnds stores the index-offset end positions of relocation // sections within elemEnds. // // For example, section K's end positions start at elemEndsEnds[K-1] // (or 0, if K==0) and end at elemEndsEnds[K]. elemEndsEnds [numRelocs]uint32 scratchRelocEnt []RelocEnt } // PkgPath returns the package path for the package // // TODO(mdempsky): Remove; unneeded since CL 391014. func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } // SyncMarkers reports whether pr uses sync markers. func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. // // TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, } // TODO(mdempsky): Implement direct indexing of input string to // avoid copying the position information. r := strings.NewReader(input) assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) switch pr.version { default: panic(fmt.Errorf("unsupported version: %v", pr.version)) case 0: // no flags case 1: var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 } assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) pos, err := r.Seek(0, io.SeekCurrent) assert(err == nil) pr.elemData = input[pos:] assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } // NumElems returns the number of elements in section k. func (pr *PkgDecoder) NumElems(k RelocKind) int { count := int(pr.elemEndsEnds[k]) if k > 0 { count -= int(pr.elemEndsEnds[k-1]) } return count } // TotalElems returns the total number of elements across all sections. func (pr *PkgDecoder) TotalElems() int { return len(pr.elemEnds) } // Fingerprint returns the package fingerprint. func (pr *PkgDecoder) Fingerprint() [8]byte { var fp [8]byte copy(fp[:], pr.elemData[len(pr.elemData)-8:]) return fp } // AbsIdx returns the absolute index for the given (section, index) // pair. func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx := int(idx) if k > 0 { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } // DataIdx returns the raw element bitstream for the given (section, // index) pair. func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { absIdx := pr.AbsIdx(k, idx) var start uint32 if absIdx > 0 { start = pr.elemEnds[absIdx-1] } end := pr.elemEnds[absIdx] return pr.elemData[start:end] } // StringIdx returns the string value for the given string index. func (pr *PkgDecoder) StringIdx(idx Index) string { return pr.DataIdx(RelocString, idx) } // NewDecoder returns a Decoder for the given (section, index) pair, // and decodes the given SyncMarker from the element bitstream. func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { r := pr.NewDecoderRaw(k, idx) r.Sync(marker) return r } // TempDecoder returns a Decoder for the given (section, index) pair, // and decodes the given SyncMarker from the element bitstream. // If possible the Decoder should be RetireDecoder'd when it is no longer // needed, this will avoid heap allocations. func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { r := pr.TempDecoderRaw(k, idx) r.Sync(marker) return r } func (pr *PkgDecoder) RetireDecoder(d *Decoder) { pr.scratchRelocEnt = d.Relocs d.Relocs = nil } // NewDecoderRaw returns a Decoder for the given (section, index) pair. // // Most callers should use NewDecoder instead. func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { r := Decoder{ common: pr, k: k, Idx: idx, } r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { r.Sync(SyncReloc) r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} } return r } func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { r := Decoder{ common: pr, k: k, Idx: idx, } r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) l := r.Len() if cap(pr.scratchRelocEnt) >= l { r.Relocs = pr.scratchRelocEnt[:l] pr.scratchRelocEnt = nil } else { r.Relocs = make([]RelocEnt, l) } for i := range r.Relocs { r.Sync(SyncReloc) r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} } return r } // A Decoder provides methods for decoding an individual element's // bitstream data. type Decoder struct { common *PkgDecoder Relocs []RelocEnt Data strings.Reader k RelocKind Idx Index } func (r *Decoder) checkErr(err error) { if err != nil { errorf("unexpected decoding error: %w", err) } } func (r *Decoder) rawUvarint() uint64 { x, err := readUvarint(&r.Data) r.checkErr(err) return x } // readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. // This avoids the interface conversion and thus has better escape properties, // which flows up the stack. func readUvarint(r *strings.Reader) (uint64, error) { var x uint64 var s uint for i := 0; i < binary.MaxVarintLen64; i++ { b, err := r.ReadByte() if err != nil { if i > 0 && err == io.EOF { err = io.ErrUnexpectedEOF } return x, err } if b < 0x80 { if i == binary.MaxVarintLen64-1 && b > 1 { return x, overflow } return x | uint64(b)<<s, nil } x |= uint64(b&0x7f) << s s += 7 } return x, overflow } var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer") func (r *Decoder) rawVarint() int64 { ux := r.rawUvarint() // Zig-zag decode. x := int64(ux >> 1) if ux&1 != 0 { x = ^x } return x } func (r *Decoder) rawReloc(k RelocKind, idx int) Index { e := r.Relocs[idx] assert(e.Kind == k) return e.Idx } // Sync decodes a sync marker from the element bitstream and asserts // that it matches the expected marker. // // If EnableSync is false, then Sync is a no-op. func (r *Decoder) Sync(mWant SyncMarker) { if !r.common.sync { return } pos, _ := r.Data.Seek(0, io.SeekCurrent) mHave := SyncMarker(r.rawUvarint()) writerPCs := make([]int, r.rawUvarint()) for i := range writerPCs { writerPCs[i] = int(r.rawUvarint()) } if mHave == mWant { return } // There's some tension here between printing: // // (1) full file paths that tools can recognize (e.g., so emacs // hyperlinks the "file:line" text for easy navigation), or // // (2) short file paths that are easier for humans to read (e.g., by // omitting redundant or irrelevant details, so it's easier to // focus on the useful bits that remain). // // The current formatting favors the former, as it seems more // helpful in practice. But perhaps the formatting could be improved // to better address both concerns. For example, use relative file // paths if they would be shorter, or rewrite file paths to contain // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how // to reliably expand that again. fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) fmt.Printf("\nfound %v, written at:\n", mHave) if len(writerPCs) == 0 { fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) } for _, pc := range writerPCs { fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) } fmt.Printf("\nexpected %v, reading at:\n", mWant) var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? n := runtime.Callers(2, readerPCs[:]) for _, pc := range fmtFrames(readerPCs[:n]...) { fmt.Printf("\t%s\n", pc) } // We already printed a stack trace for the reader, so now we can // simply exit. Printing a second one with panic or base.Fatalf // would just be noise. os.Exit(1) } // Bool decodes and returns a bool value from the element bitstream. func (r *Decoder) Bool() bool { r.Sync(SyncBool) x, err := r.Data.ReadByte() r.checkErr(err) assert(x < 2) return x != 0 } // Int64 decodes and returns an int64 value from the element bitstream. func (r *Decoder) Int64() int64 { r.Sync(SyncInt64) return r.rawVarint() } // Int64 decodes and returns a uint64 value from the element bitstream. func (r *Decoder) Uint64() uint64 { r.Sync(SyncUint64) return r.rawUvarint() } // Len decodes and returns a non-negative int value from the element bitstream. func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } // Int decodes and returns an int value from the element bitstream. func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } // Uint decodes and returns a uint value from the element bitstream. func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } // Code decodes a Code value from the element bitstream and returns // its ordinal value. It's the caller's responsibility to convert the // result to an appropriate Code type. // // TODO(mdempsky): Ideally this method would have signature "Code[T // Code] T" instead, but we don't allow generic methods and the // compiler can't depend on generics yet anyway. func (r *Decoder) Code(mark SyncMarker) int { r.Sync(mark) return r.Len() } // Reloc decodes a relocation of expected section k from the element // bitstream and returns an index to the referenced element. func (r *Decoder) Reloc(k RelocKind) Index { r.Sync(SyncUseReloc) return r.rawReloc(k, r.Len()) } // String decodes and returns a string value from the element // bitstream. func (r *Decoder) String() string { r.Sync(SyncString) return r.common.StringIdx(r.Reloc(RelocString)) } // Strings decodes and returns a variable-length slice of strings from // the element bitstream. func (r *Decoder) Strings() []string { res := make([]string, r.Len()) for i := range res { res[i] = r.String() } return res } // Value decodes and returns a constant.Value from the element // bitstream. func (r *Decoder) Value() constant.Value { r.Sync(SyncValue) isComplex := r.Bool() val := r.scalar() if isComplex { val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) } return val } func (r *Decoder) scalar() constant.Value { switch tag := CodeVal(r.Code(SyncVal)); tag { default: panic(fmt.Errorf("unexpected scalar tag: %v", tag)) case ValBool: return constant.MakeBool(r.Bool()) case ValString: return constant.MakeString(r.String()) case ValInt64: return constant.MakeInt64(r.Int64()) case ValBigInt: return constant.Make(r.bigInt()) case ValBigRat: num := r.bigInt() denom := r.bigInt() return constant.Make(new(big.Rat).SetFrac(num, denom)) case ValBigFloat: return constant.Make(r.bigFloat()) } } func (r *Decoder) bigInt() *big.Int { v := new(big.Int).SetBytes([]byte(r.String())) if r.Bool() { v.Neg(v) } return v } func (r *Decoder) bigFloat() *big.Float { v := new(big.Float).SetPrec(512) assert(v.UnmarshalText([]byte(r.String())) == nil) return v } // @@@ Helpers // TODO(mdempsky): These should probably be removed. I think they're a // smell that the export data format is not yet quite right. // PeekPkgPath returns the package path for the specified package // index. func (pr *PkgDecoder) PeekPkgPath(idx Index) string { var path string { r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) path = r.String() pr.RetireDecoder(&r) } if path == "" { path = pr.pkgPath } return path } // PeekObj returns the package path, object name, and CodeObj for the // specified object index. func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { var ridx Index var name string var rcode int { r := pr.TempDecoder(RelocName, idx, SyncObject1) r.Sync(SyncSym) r.Sync(SyncPkg) ridx = r.Reloc(RelocPkg) name = r.String() rcode = r.Code(SyncCodeObj) pr.RetireDecoder(&r) } path := pr.PeekPkgPath(ridx) assert(name != "") tag := CodeObj(rcode) return path, name, tag }
go/src/internal/pkgbits/decoder.go/0
{ "file_path": "go/src/internal/pkgbits/decoder.go", "repo_id": "go", "token_count": 5137 }
285
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package poll_test import ( "fmt" "io/fs" "net" "os" "testing" "time" ) func TestReadError(t *testing.T) { t.Run("ErrNotPollable", func(t *testing.T) { f, err := badStateFile() if err != nil { t.Skip(err) } defer f.Close() // Give scheduler a chance to have two separated // goroutines: an event poller and an event waiter. time.Sleep(100 * time.Millisecond) var b [1]byte _, err = f.Read(b[:]) if perr := parseReadError(err, isBadStateFileError); perr != nil { t.Fatal(perr) } }) } func parseReadError(nestedErr error, verify func(error) (string, bool)) error { err := nestedErr if nerr, ok := err.(*net.OpError); ok { err = nerr.Err } if nerr, ok := err.(*fs.PathError); ok { err = nerr.Err } if nerr, ok := err.(*os.SyscallError); ok { err = nerr.Err } if s, ok := verify(err); !ok { return fmt.Errorf("got %v; want %s", nestedErr, s) } return nil }
go/src/internal/poll/error_test.go/0
{ "file_path": "go/src/internal/poll/error_test.go", "repo_id": "go", "token_count": 448 }
286
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build unix || (js && wasm) || wasip1 || windows package poll import ( "io" "syscall" ) // eofError returns io.EOF when fd is available for reading end of // file. func (fd *FD) eofError(n int, err error) error { if n == 0 && err == nil && fd.ZeroReadIsEOF { return io.EOF } return err } // Shutdown wraps syscall.Shutdown. func (fd *FD) Shutdown(how int) error { if err := fd.incref(); err != nil { return err } defer fd.decref() return syscall.Shutdown(fd.Sysfd, how) } // Fchown wraps syscall.Fchown. func (fd *FD) Fchown(uid, gid int) error { if err := fd.incref(); err != nil { return err } defer fd.decref() return ignoringEINTR(func() error { return syscall.Fchown(fd.Sysfd, uid, gid) }) } // Ftruncate wraps syscall.Ftruncate. func (fd *FD) Ftruncate(size int64) error { if err := fd.incref(); err != nil { return err } defer fd.decref() return ignoringEINTR(func() error { return syscall.Ftruncate(fd.Sysfd, size) }) } // RawControl invokes the user-defined function f for a non-IO // operation. func (fd *FD) RawControl(f func(uintptr)) error { if err := fd.incref(); err != nil { return err } defer fd.decref() f(uintptr(fd.Sysfd)) return nil } // ignoringEINTR makes a function call and repeats it if it returns // an EINTR error. This appears to be required even though we install all // signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846. // Also #20400 and #36644 are issues in which a signal handler is // installed without setting SA_RESTART. None of these are the common case, // but there are enough of them that it seems that we can't avoid // an EINTR loop. func ignoringEINTR(fn func() error) error { for { err := fn() if err != syscall.EINTR { return err } } }
go/src/internal/poll/fd_posix.go/0
{ "file_path": "go/src/internal/poll/fd_posix.go", "repo_id": "go", "token_count": 721 }
287
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build unix package poll import ( "io" "runtime" "syscall" ) // Writev wraps the writev system call. func (fd *FD) Writev(v *[][]byte) (int64, error) { if err := fd.writeLock(); err != nil { return 0, err } defer fd.writeUnlock() if err := fd.pd.prepareWrite(fd.isFile); err != nil { return 0, err } var iovecs []syscall.Iovec if fd.iovecs != nil { iovecs = *fd.iovecs } // TODO: read from sysconf(_SC_IOV_MAX)? The Linux default is // 1024 and this seems conservative enough for now. Darwin's // UIO_MAXIOV also seems to be 1024. maxVec := 1024 if runtime.GOOS == "aix" || runtime.GOOS == "solaris" { // IOV_MAX is set to XOPEN_IOV_MAX on AIX and Solaris. maxVec = 16 } var n int64 var err error for len(*v) > 0 { iovecs = iovecs[:0] for _, chunk := range *v { if len(chunk) == 0 { continue } iovecs = append(iovecs, newIovecWithBase(&chunk[0])) if fd.IsStream && len(chunk) > 1<<30 { iovecs[len(iovecs)-1].SetLen(1 << 30) break // continue chunk on next writev } iovecs[len(iovecs)-1].SetLen(len(chunk)) if len(iovecs) == maxVec { break } } if len(iovecs) == 0 { break } if fd.iovecs == nil { fd.iovecs = new([]syscall.Iovec) } *fd.iovecs = iovecs // cache var wrote uintptr wrote, err = writev(fd.Sysfd, iovecs) if wrote == ^uintptr(0) { wrote = 0 } TestHookDidWritev(int(wrote)) n += int64(wrote) consume(v, int64(wrote)) clear(iovecs) if err != nil { if err == syscall.EINTR { continue } if err == syscall.EAGAIN { if err = fd.pd.waitWrite(fd.isFile); err == nil { continue } } break } if n == 0 { err = io.ErrUnexpectedEOF break } } return n, err }
go/src/internal/poll/writev.go/0
{ "file_path": "go/src/internal/poll/writev.go", "repo_id": "go", "token_count": 868 }
288
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package reflectlite import ( "unsafe" ) // Field returns the i'th field of the struct v. // It panics if v's Kind is not Struct or i is out of range. func Field(v Value, i int) Value { if v.kind() != Struct { panic(&ValueError{"reflect.Value.Field", v.kind()}) } tt := (*structType)(unsafe.Pointer(v.typ())) if uint(i) >= uint(len(tt.Fields)) { panic("reflect: Field index out of range") } field := &tt.Fields[i] typ := field.Typ // Inherit permission bits from v, but clear flagEmbedRO. fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind()) // Using an unexported field forces flagRO. if !field.Name.IsExported() { if field.Embedded() { fl |= flagEmbedRO } else { fl |= flagStickyRO } } // Either flagIndir is set and v.ptr points at struct, // or flagIndir is not set and v.ptr is the actual struct data. // In the former case, we want v.ptr + offset. // In the latter case, we must have field.offset = 0, // so v.ptr + field.offset is still the correct address. ptr := add(v.ptr, field.Offset, "same as non-reflect &v.field") return Value{typ, ptr, fl} } func TField(typ Type, i int) Type { t := typ.(rtype) if t.Kind() != Struct { panic("reflect: Field of non-struct type") } tt := (*structType)(unsafe.Pointer(t.Type)) return StructFieldType(tt, i) } // Field returns the i'th struct field. func StructFieldType(t *structType, i int) Type { if i < 0 || i >= len(t.Fields) { panic("reflect: Field index out of bounds") } p := &t.Fields[i] return toType(p.Typ) } // Zero returns a Value representing the zero value for the specified type. // The result is different from the zero value of the Value struct, // which represents no value at all. // For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0. // The returned value is neither addressable nor settable. func Zero(typ Type) Value { if typ == nil { panic("reflect: Zero(nil)") } t := typ.common() fl := flag(t.Kind()) if t.IfaceIndir() { return Value{t, unsafe_New(t), fl | flagIndir} } return Value{t, nil, fl} } // ToInterface returns v's current value as an interface{}. // It is equivalent to: // // var i interface{} = (v's underlying value) // // It panics if the Value was obtained by accessing // unexported struct fields. func ToInterface(v Value) (i any) { return valueInterface(v) } type EmbedWithUnexpMeth struct{} func (EmbedWithUnexpMeth) f() {} type pinUnexpMeth interface { f() } var pinUnexpMethI = pinUnexpMeth(EmbedWithUnexpMeth{}) func FirstMethodNameBytes(t Type) *byte { _ = pinUnexpMethI ut := t.uncommon() if ut == nil { panic("type has no methods") } m := ut.Methods()[0] mname := t.(rtype).nameOff(m.Name) if *mname.DataChecked(0, "name flag field")&(1<<2) == 0 { panic("method name does not have pkgPath *string") } return mname.Bytes } type Buffer struct { buf []byte }
go/src/internal/reflectlite/export_test.go/0
{ "file_path": "go/src/internal/reflectlite/export_test.go", "repo_id": "go", "token_count": 1093 }
289
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "go_asm.h" #include "textflag.h" TEXT ·Casint32(SB), NOSPLIT, $0-17 B ·Cas(SB) TEXT ·Casint64(SB), NOSPLIT, $0-25 B ·Cas64(SB) TEXT ·Casuintptr(SB), NOSPLIT, $0-25 B ·Cas64(SB) TEXT ·CasRel(SB), NOSPLIT, $0-17 B ·Cas(SB) TEXT ·Loadint32(SB), NOSPLIT, $0-12 B ·Load(SB) TEXT ·Loadint64(SB), NOSPLIT, $0-16 B ·Load64(SB) TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 B ·Load64(SB) TEXT ·Loaduint(SB), NOSPLIT, $0-16 B ·Load64(SB) TEXT ·Storeint32(SB), NOSPLIT, $0-12 B ·Store(SB) TEXT ·Storeint64(SB), NOSPLIT, $0-16 B ·Store64(SB) TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 B ·Store64(SB) TEXT ·Xaddint32(SB), NOSPLIT, $0-20 B ·Xadd(SB) TEXT ·Xaddint64(SB), NOSPLIT, $0-24 B ·Xadd64(SB) TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 B ·Xadd64(SB) TEXT ·Casp1(SB), NOSPLIT, $0-25 B ·Cas64(SB) // uint32 ·Load(uint32 volatile* addr) TEXT ·Load(SB),NOSPLIT,$0-12 MOVD ptr+0(FP), R0 LDARW (R0), R0 MOVW R0, ret+8(FP) RET // uint8 ·Load8(uint8 volatile* addr) TEXT ·Load8(SB),NOSPLIT,$0-9 MOVD ptr+0(FP), R0 LDARB (R0), R0 MOVB R0, ret+8(FP) RET // uint64 ·Load64(uint64 volatile* addr) TEXT ·Load64(SB),NOSPLIT,$0-16 MOVD ptr+0(FP), R0 LDAR (R0), R0 MOVD R0, ret+8(FP) RET // void *·Loadp(void *volatile *addr) TEXT ·Loadp(SB),NOSPLIT,$0-16 MOVD ptr+0(FP), R0 LDAR (R0), R0 MOVD R0, ret+8(FP) RET // uint32 ·LoadAcq(uint32 volatile* addr) TEXT ·LoadAcq(SB),NOSPLIT,$0-12 B ·Load(SB) // uint64 ·LoadAcquintptr(uint64 volatile* addr) TEXT ·LoadAcq64(SB),NOSPLIT,$0-16 B ·Load64(SB) // uintptr ·LoadAcq64(uintptr volatile* addr) TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16 B ·Load64(SB) TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 B ·Store64(SB) TEXT ·StoreRel(SB), NOSPLIT, $0-12 B ·Store(SB) TEXT ·StoreRel64(SB), NOSPLIT, $0-16 B ·Store64(SB) TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 B ·Store64(SB) TEXT ·Store(SB), NOSPLIT, $0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 STLRW R1, (R0) RET TEXT ·Store8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R0 MOVB val+8(FP), R1 STLRB R1, (R0) RET TEXT ·Store64(SB), NOSPLIT, $0-16 MOVD ptr+0(FP), R0 MOVD val+8(FP), R1 STLR R1, (R0) RET // uint32 Xchg(ptr *uint32, new uint32) // Atomically: // old := *ptr; // *ptr = new; // return old; TEXT ·Xchg(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R0 MOVW new+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif SWPALW R1, (R0), R2 MOVW R2, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 STLXRW R1, (R0), R3 CBNZ R3, load_store_loop MOVW R2, ret+16(FP) RET #endif // uint64 Xchg64(ptr *uint64, new uint64) // Atomically: // old := *ptr; // *ptr = new; // return old; TEXT ·Xchg64(SB), NOSPLIT, $0-24 MOVD ptr+0(FP), R0 MOVD new+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif SWPALD R1, (R0), R2 MOVD R2, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXR (R0), R2 STLXR R1, (R0), R3 CBNZ R3, load_store_loop MOVD R2, ret+16(FP) RET #endif // bool Cas(uint32 *ptr, uint32 old, uint32 new) // Atomically: // if(*val == old){ // *val = new; // return 1; // } else // return 0; TEXT ·Cas(SB), NOSPLIT, $0-17 MOVD ptr+0(FP), R0 MOVW old+8(FP), R1 MOVW new+12(FP), R2 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif MOVD R1, R3 CASALW R3, (R0), R2 CMP R1, R3 CSET EQ, R0 MOVB R0, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R3 CMPW R1, R3 BNE ok STLXRW R2, (R0), R3 CBNZ R3, load_store_loop ok: CSET EQ, R0 MOVB R0, ret+16(FP) RET #endif // bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) // Atomically: // if(*val == old){ // *val = new; // return 1; // } else { // return 0; // } TEXT ·Cas64(SB), NOSPLIT, $0-25 MOVD ptr+0(FP), R0 MOVD old+8(FP), R1 MOVD new+16(FP), R2 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif MOVD R1, R3 CASALD R3, (R0), R2 CMP R1, R3 CSET EQ, R0 MOVB R0, ret+24(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXR (R0), R3 CMP R1, R3 BNE ok STLXR R2, (R0), R3 CBNZ R3, load_store_loop ok: CSET EQ, R0 MOVB R0, ret+24(FP) RET #endif // uint32 xadd(uint32 volatile *ptr, int32 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R0 MOVW delta+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif LDADDALW R1, (R0), R2 ADD R1, R2 MOVW R2, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 ADDW R2, R1, R2 STLXRW R2, (R0), R3 CBNZ R3, load_store_loop MOVW R2, ret+16(FP) RET #endif // uint64 Xadd64(uint64 volatile *ptr, int64 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd64(SB), NOSPLIT, $0-24 MOVD ptr+0(FP), R0 MOVD delta+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif LDADDALD R1, (R0), R2 ADD R1, R2 MOVD R2, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXR (R0), R2 ADD R2, R1, R2 STLXR R2, (R0), R3 CBNZ R3, load_store_loop MOVD R2, ret+16(FP) RET #endif TEXT ·Xchgint32(SB), NOSPLIT, $0-20 B ·Xchg(SB) TEXT ·Xchgint64(SB), NOSPLIT, $0-24 B ·Xchg64(SB) TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 B ·Xchg64(SB) TEXT ·And8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R0 MOVB val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif MVN R1, R2 LDCLRALB R2, (R0), R3 RET #ifndef GOARM64_LSE load_store_loop: LDAXRB (R0), R2 AND R1, R2 STLXRB R2, (R0), R3 CBNZ R3, load_store_loop RET #endif TEXT ·Or8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R0 MOVB val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif LDORALB R1, (R0), R2 RET #ifndef GOARM64_LSE load_store_loop: LDAXRB (R0), R2 ORR R1, R2 STLXRB R2, (R0), R3 CBNZ R3, load_store_loop RET #endif // func And(addr *uint32, v uint32) TEXT ·And(SB), NOSPLIT, $0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif MVN R1, R2 LDCLRALW R2, (R0), R3 RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 AND R1, R2 STLXRW R2, (R0), R3 CBNZ R3, load_store_loop RET #endif // func Or(addr *uint32, v uint32) TEXT ·Or(SB), NOSPLIT, $0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif LDORALW R1, (R0), R2 RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 ORR R1, R2 STLXRW R2, (R0), R3 CBNZ R3, load_store_loop RET #endif // func Or32(addr *uint32, v uint32) old uint32 TEXT ·Or32(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif LDORALW R1, (R0), R2 MOVD R2, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 ORR R1, R2, R3 STLXRW R3, (R0), R4 CBNZ R4, load_store_loop MOVD R2, ret+16(FP) RET #endif // func And32(addr *uint32, v uint32) old uint32 TEXT ·And32(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif MVN R1, R2 LDCLRALW R2, (R0), R3 MOVD R3, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXRW (R0), R2 AND R1, R2, R3 STLXRW R3, (R0), R4 CBNZ R4, load_store_loop MOVD R2, ret+16(FP) RET #endif // func Or64(addr *uint64, v uint64) old uint64 TEXT ·Or64(SB), NOSPLIT, $0-24 MOVD ptr+0(FP), R0 MOVD val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif LDORALD R1, (R0), R2 MOVD R2, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXR (R0), R2 ORR R1, R2, R3 STLXR R3, (R0), R4 CBNZ R4, load_store_loop MOVD R2, ret+16(FP) RET #endif // func And64(addr *uint64, v uint64) old uint64 TEXT ·And64(SB), NOSPLIT, $0-24 MOVD ptr+0(FP), R0 MOVD val+8(FP), R1 #ifndef GOARM64_LSE MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 CBZ R4, load_store_loop #endif MVN R1, R2 LDCLRALD R2, (R0), R3 MOVD R3, ret+16(FP) RET #ifndef GOARM64_LSE load_store_loop: LDAXR (R0), R2 AND R1, R2, R3 STLXR R3, (R0), R4 CBNZ R4, load_store_loop MOVD R2, ret+16(FP) RET #endif // func Anduintptr(addr *uintptr, v uintptr) old uintptr TEXT ·Anduintptr(SB), NOSPLIT, $0-24 B ·And64(SB) // func Oruintptr(addr *uintptr, v uintptr) old uintptr TEXT ·Oruintptr(SB), NOSPLIT, $0-24 B ·Or64(SB)
go/src/internal/runtime/atomic/atomic_arm64.s/0
{ "file_path": "go/src/internal/runtime/atomic/atomic_arm64.s", "repo_id": "go", "token_count": 5143 }
290
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package atomic_test import ( "internal/runtime/atomic" "testing" ) var sink any func BenchmarkAtomicLoad64(b *testing.B) { var x uint64 sink = &x for i := 0; i < b.N; i++ { _ = atomic.Load64(&x) } } func BenchmarkAtomicStore64(b *testing.B) { var x uint64 sink = &x for i := 0; i < b.N; i++ { atomic.Store64(&x, 0) } } func BenchmarkAtomicLoad(b *testing.B) { var x uint32 sink = &x for i := 0; i < b.N; i++ { _ = atomic.Load(&x) } } func BenchmarkAtomicStore(b *testing.B) { var x uint32 sink = &x for i := 0; i < b.N; i++ { atomic.Store(&x, 0) } } func BenchmarkAnd8(b *testing.B) { var x [512]uint8 // give byte its own cache line sink = &x for i := 0; i < b.N; i++ { atomic.And8(&x[255], uint8(i)) } } func BenchmarkAnd(b *testing.B) { var x [128]uint32 // give x its own cache line sink = &x for i := 0; i < b.N; i++ { atomic.And(&x[63], uint32(i)) } } func BenchmarkAnd8Parallel(b *testing.B) { var x [512]uint8 // give byte its own cache line sink = &x b.RunParallel(func(pb *testing.PB) { i := uint8(0) for pb.Next() { atomic.And8(&x[255], i) i++ } }) } func BenchmarkAndParallel(b *testing.B) { var x [128]uint32 // give x its own cache line sink = &x b.RunParallel(func(pb *testing.PB) { i := uint32(0) for pb.Next() { atomic.And(&x[63], i) i++ } }) } func BenchmarkOr8(b *testing.B) { var x [512]uint8 // give byte its own cache line sink = &x for i := 0; i < b.N; i++ { atomic.Or8(&x[255], uint8(i)) } } func BenchmarkOr(b *testing.B) { var x [128]uint32 // give x its own cache line sink = &x for i := 0; i < b.N; i++ { atomic.Or(&x[63], uint32(i)) } } func BenchmarkOr8Parallel(b *testing.B) { var x [512]uint8 // give byte its own cache line sink = &x b.RunParallel(func(pb *testing.PB) { i := uint8(0) for pb.Next() { atomic.Or8(&x[255], i) i++ } }) } func BenchmarkOrParallel(b *testing.B) { var x [128]uint32 // give x its own cache line sink = &x b.RunParallel(func(pb *testing.PB) { i := uint32(0) for pb.Next() { atomic.Or(&x[63], i) i++ } }) } func BenchmarkXadd(b *testing.B) { var x uint32 ptr := &x b.RunParallel(func(pb *testing.PB) { for pb.Next() { atomic.Xadd(ptr, 1) } }) } func BenchmarkXadd64(b *testing.B) { var x uint64 ptr := &x b.RunParallel(func(pb *testing.PB) { for pb.Next() { atomic.Xadd64(ptr, 1) } }) } func BenchmarkCas(b *testing.B) { var x uint32 x = 1 ptr := &x b.RunParallel(func(pb *testing.PB) { for pb.Next() { atomic.Cas(ptr, 1, 0) atomic.Cas(ptr, 0, 1) } }) } func BenchmarkCas64(b *testing.B) { var x uint64 x = 1 ptr := &x b.RunParallel(func(pb *testing.PB) { for pb.Next() { atomic.Cas64(ptr, 1, 0) atomic.Cas64(ptr, 0, 1) } }) } func BenchmarkXchg(b *testing.B) { var x uint32 x = 1 ptr := &x b.RunParallel(func(pb *testing.PB) { var y uint32 y = 1 for pb.Next() { y = atomic.Xchg(ptr, y) y += 1 } }) } func BenchmarkXchg64(b *testing.B) { var x uint64 x = 1 ptr := &x b.RunParallel(func(pb *testing.PB) { var y uint64 y = 1 for pb.Next() { y = atomic.Xchg64(ptr, y) y += 1 } }) }
go/src/internal/runtime/atomic/bench_test.go/0
{ "file_path": "go/src/internal/runtime/atomic/bench_test.go", "repo_id": "go", "token_count": 1601 }
291
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sys // NOTE: keep in sync with cmd/compile/internal/types.CalcSize // to make the compiler recognize this as an intrinsic type. type nih struct{} // NotInHeap is a type must never be allocated from the GC'd heap or on the stack, // and is called not-in-heap. // // Other types can embed NotInHeap to make it not-in-heap. Specifically, pointers // to these types must always fail the `runtime.inheap` check. The type may be used // for global variables, or for objects in unmanaged memory (e.g., allocated with // `sysAlloc`, `persistentalloc`, r`fixalloc`, or from a manually-managed span). // // Specifically: // // 1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap // allocation of T are disallowed. (Though implicit allocations are // disallowed in the runtime anyway.) // // 2. A pointer to a regular type (other than `unsafe.Pointer`) cannot be // converted to a pointer to a not-in-heap type, even if they have the // same underlying type. // // 3. Any type that containing a not-in-heap type is itself considered as not-in-heap. // // - Structs and arrays are not-in-heap if their elements are not-in-heap. // - Maps and channels contains no-in-heap types are disallowed. // // 4. Write barriers on pointers to not-in-heap types can be omitted. // // The last point is the real benefit of NotInHeap. The runtime uses // it for low-level internal structures to avoid memory barriers in the // scheduler and the memory allocator where they are illegal or simply // inefficient. This mechanism is reasonably safe and does not compromise // the readability of the runtime. type NotInHeap struct{ _ nih }
go/src/internal/runtime/sys/nih.go/0
{ "file_path": "go/src/internal/runtime/sys/nih.go", "repo_id": "go", "token_count": 506 }
292
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package unix import ( "internal/abi" "unsafe" ) //go:cgo_import_dynamic libc_arc4random_buf arc4random_buf "/usr/lib/libSystem.B.dylib" func libc_arc4random_buf_trampoline() // ARC4Random calls the macOS arc4random_buf(3) function. func ARC4Random(p []byte) { // macOS 11 and 12 abort if length is 0. if len(p) == 0 { return } syscall_syscall(abi.FuncPCABI0(libc_arc4random_buf_trampoline), uintptr(unsafe.Pointer(unsafe.SliceData(p))), uintptr(len(p)), 0) }
go/src/internal/syscall/unix/arc4random_darwin.go/0
{ "file_path": "go/src/internal/syscall/unix/arc4random_darwin.go", "repo_id": "go", "token_count": 240 }
293
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package unix import "syscall" func PosixFallocate(fd int, off int64, size int64) error { // If successful, posix_fallocate() returns zero. It returns an error on failure, without // setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1 // // The padding 0 argument is needed because the ARM calling convention requires that if an // argument (off in this case) needs double-word alignment (8-byte), the NCRN (next core // register number) is rounded up to the next even register number. // See https://github.com/ARM-software/abi-aa/blob/2bcab1e3b22d55170c563c3c7940134089176746/aapcs32/aapcs32.rst#parameter-passing r1, _, _ := syscall.Syscall6(posixFallocateTrap, uintptr(fd), 0, uintptr(off), uintptr(off>>32), uintptr(size), uintptr(size>>32)) if r1 != 0 { return syscall.Errno(r1) } return nil }
go/src/internal/syscall/unix/fallocate_freebsd_arm.go/0
{ "file_path": "go/src/internal/syscall/unix/fallocate_freebsd_arm.go", "repo_id": "go", "token_count": 351 }
294
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build solaris package unix_test import ( "internal/syscall/unix" "runtime" "syscall" "testing" ) func TestSupportSockNonblockCloexec(t *testing.T) { // Test that SupportSockNonblockCloexec returns true if socket succeeds with SOCK_NONBLOCK and SOCK_CLOEXEC. s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) if err == nil { syscall.Close(s) } wantSock := err != syscall.EPROTONOSUPPORT && err != syscall.EINVAL gotSock := unix.SupportSockNonblockCloexec() if wantSock != gotSock { t.Fatalf("SupportSockNonblockCloexec, got %t; want %t", gotSock, wantSock) } // Test that SupportAccept4 returns true if accept4 is available. for { _, _, err = syscall.Accept4(0, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC) if err != syscall.EINTR { break } } wantAccept4 := err != syscall.ENOSYS gotAccept4 := unix.SupportAccept4() if wantAccept4 != gotAccept4 { t.Fatalf("SupportAccept4, got %t; want %t", gotAccept4, wantAccept4) } // Test that the version returned by KernelVersion matches expectations. major, minor := unix.KernelVersion() t.Logf("Kernel version: %d.%d", major, minor) if runtime.GOOS == "illumos" { if gotSock && gotAccept4 && (major < 5 || (major == 5 && minor < 11)) { t.Fatalf("SupportSockNonblockCloexec and SupportAccept4 are true, but kernel version is older than 5.11, SunOS version: %d.%d", major, minor) } if !gotSock && !gotAccept4 && (major > 5 || (major == 5 && minor >= 11)) { t.Errorf("SupportSockNonblockCloexec and SupportAccept4 are false, but kernel version is 5.11 or newer, SunOS version: %d.%d", major, minor) } } else { // Solaris if gotSock && gotAccept4 && (major < 11 || (major == 11 && minor < 4)) { t.Fatalf("SupportSockNonblockCloexec and SupportAccept4 are true, but kernel version is older than 11.4, Solaris version: %d.%d", major, minor) } if !gotSock && !gotAccept4 && (major > 11 || (major == 11 && minor >= 4)) { t.Errorf("SupportSockNonblockCloexec and SupportAccept4 are false, but kernel version is 11.4 or newer, Solaris version: %d.%d", major, minor) } } }
go/src/internal/syscall/unix/kernel_version_solaris_test.go/0
{ "file_path": "go/src/internal/syscall/unix/kernel_version_solaris_test.go", "repo_id": "go", "token_count": 863 }
295
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package unix_test import ( "internal/goarch" "internal/syscall/unix" "runtime" "strings" "testing" "unsafe" ) // TestSiginfoChildLayout validates SiginfoChild layout. Modelled after // static assertions in linux kernel's arch/*/kernel/signal*.c. func TestSiginfoChildLayout(t *testing.T) { var si unix.SiginfoChild const host64bit = goarch.PtrSize == 8 if v := unsafe.Sizeof(si); v != 128 { t.Fatalf("sizeof: got %d, want 128", v) } ofSigno := 0 ofErrno := 4 ofCode := 8 if strings.HasPrefix(runtime.GOARCH, "mips") { // These two fields are swapped on MIPS platforms. ofErrno, ofCode = ofCode, ofErrno } ofPid := 12 if host64bit { ofPid = 16 } ofUid := ofPid + 4 ofStatus := ofPid + 8 offsets := []struct { name string got uintptr want int }{ {"Signo", unsafe.Offsetof(si.Signo), ofSigno}, {"Errno", unsafe.Offsetof(si.Errno), ofErrno}, {"Code", unsafe.Offsetof(si.Code), ofCode}, {"Pid", unsafe.Offsetof(si.Pid), ofPid}, {"Uid", unsafe.Offsetof(si.Uid), ofUid}, {"Status", unsafe.Offsetof(si.Status), ofStatus}, } for _, tc := range offsets { if int(tc.got) != tc.want { t.Errorf("offsetof %s: got %d, want %d", tc.name, tc.got, tc.want) } } }
go/src/internal/syscall/unix/siginfo_linux_test.go/0
{ "file_path": "go/src/internal/syscall/unix/siginfo_linux_test.go", "repo_id": "go", "token_count": 554 }
296
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package windows type PROCESS_MEMORY_COUNTERS struct { CB uint32 PageFaultCount uint32 PeakWorkingSetSize uintptr WorkingSetSize uintptr QuotaPeakPagedPoolUsage uintptr QuotaPagedPoolUsage uintptr QuotaPeakNonPagedPoolUsage uintptr QuotaNonPagedPoolUsage uintptr PagefileUsage uintptr PeakPagefileUsage uintptr } //sys GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) = psapi.GetProcessMemoryInfo
go/src/internal/syscall/windows/psapi_windows.go/0
{ "file_path": "go/src/internal/syscall/windows/psapi_windows.go", "repo_id": "go", "token_count": 294 }
297
// Code generated by 'go generate'; DO NOT EDIT. package windows import ( "internal/syscall/windows/sysdll" "syscall" "unsafe" ) var _ unsafe.Pointer // Do the interface allocations only once for common // Errno values. const ( errnoERROR_IO_PENDING = 997 ) var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. func errnoErr(e syscall.Errno) error { switch e { case 0: return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } // TODO: add more here, after collecting data on the common // error values see on Windows. (perhaps when running // all.bat?) return e } var ( modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll")) modbcryptprimitives = syscall.NewLazyDLL(sysdll.Add("bcryptprimitives.dll")) modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll")) modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll")) modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll")) modntdll = syscall.NewLazyDLL(sysdll.Add("ntdll.dll")) modpsapi = syscall.NewLazyDLL(sysdll.Add("psapi.dll")) moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll")) modws2_32 = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll")) procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") procOpenServiceW = modadvapi32.NewProc("OpenServiceW") procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") procProcessPrng = modbcryptprimitives.NewProc("ProcessPrng") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procCreateEventW = modkernel32.NewProc("CreateEventW") procGetACP = modkernel32.NewProc("GetACP") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetTempPath2W = modkernel32.NewProc("GetTempPath2W") procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") procLockFileEx = modkernel32.NewProc("LockFileEx") procModule32FirstW = modkernel32.NewProc("Module32FirstW") procModule32NextW = modkernel32.NewProc("Module32NextW") procMoveFileExW = modkernel32.NewProc("MoveFileExW") procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry") procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind") procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") procVirtualQuery = modkernel32.NewProc("VirtualQuery") procNetShareAdd = modnetapi32.NewProc("NetShareAdd") procNetShareDel = modnetapi32.NewProc("NetShareDel") procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups") procRtlGetVersion = modntdll.NewProc("RtlGetVersion") procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSASocketW = modws2_32.NewProc("WSASocketW") ) func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) { var _p0 uint32 if disableAllPrivileges { _p0 = 1 } r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) ret = uint32(r0) if true { err = errnoErr(e1) } return } func DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) { r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(hExistingToken), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpTokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(phNewToken))) if r1 == 0 { err = errnoErr(e1) } return } func ImpersonateSelf(impersonationlevel uint32) (err error) { r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } return } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle syscall.Handle, err error) { r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = syscall.Handle(r0) if handle == 0 { err = errnoErr(e1) } return } func OpenService(mgr syscall.Handle, serviceName *uint16, access uint32) (handle syscall.Handle, err error) { r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = syscall.Handle(r0) if handle == 0 { err = errnoErr(e1) } return } func OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) { var _p0 uint32 if openasself { _p0 = 1 } r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(h), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func QueryServiceStatus(hService syscall.Handle, lpServiceStatus *SERVICE_STATUS) (err error) { r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(hService), uintptr(unsafe.Pointer(lpServiceStatus)), 0) if r1 == 0 { err = errnoErr(e1) } return } func RevertToSelf() (err error) { r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) { r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(tokenHandle), uintptr(tokenInformationClass), uintptr(tokenInformation), uintptr(tokenInformationLength), 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func ProcessPrng(buf []byte) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] } r1, _, e1 := syscall.Syscall(procProcessPrng.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0) if r1 == 0 { err = errnoErr(e1) } return } func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } return } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) handle = syscall.Handle(r0) if handle == 0 { err = errnoErr(e1) } return } func GetACP() (acp uint32) { r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) acp = uint32(r0) return } func GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nameformat), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } return } func GetConsoleCP() (ccp uint32) { r0, _, _ := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) ccp = uint32(r0) return } func GetCurrentThread() (pseudoHandle syscall.Handle, err error) { r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) pseudoHandle = syscall.Handle(r0) if pseudoHandle == 0 { err = errnoErr(e1) } return } func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) { r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(info)), uintptr(bufsize), 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) n = uint32(r0) if n == 0 { err = errnoErr(e1) } return } func GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(fn)), uintptr(len)) n = uint32(r0) if n == 0 { err = errnoErr(e1) } return } func GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetTempPath2W.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) n = uint32(r0) if n == 0 { err = errnoErr(e1) } return } func GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) if r1 == 0 { err = errnoErr(e1) } return } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } return } func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } return } func Module32First(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) { r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) if r1 == 0 { err = errnoErr(e1) } return } func Module32Next(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) { r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) if r1 == 0 { err = errnoErr(e1) } return } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } return } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) } return } func RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) { r0, _, _ := syscall.Syscall(procRtlLookupFunctionEntry.Addr(), 3, uintptr(pc), uintptr(unsafe.Pointer(baseAddress)), uintptr(unsafe.Pointer(table))) ret = uintptr(r0) return } func RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) { r0, _, _ := syscall.Syscall9(procRtlVirtualUnwind.Addr(), 8, uintptr(handlerType), uintptr(baseAddress), uintptr(pc), uintptr(entry), uintptr(ctxt), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(frame)), uintptr(unsafe.Pointer(ctxptrs)), 0) ret = uintptr(r0) return } func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) { r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(fileInformationClass), uintptr(buf), uintptr(bufsize), 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { err = errnoErr(e1) } return } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } return } func NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) { r0, _, _ := syscall.Syscall6(procNetShareAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parmErr)), 0, 0) if r0 != 0 { neterr = syscall.Errno(r0) } return } func NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) { r0, _, _ := syscall.Syscall(procNetShareDel.Addr(), 3, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(netName)), uintptr(reserved)) if r0 != 0 { neterr = syscall.Errno(r0) } return } func NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) { r0, _, _ := syscall.Syscall9(procNetUserGetLocalGroups.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(flags), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), 0) if r0 != 0 { neterr = syscall.Errno(r0) } return } func rtlGetVersion(info *_OSVERSIONINFOW) { syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) return } func GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(memCounters)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } return } func CreateEnvironmentBlock(block **uint16, token syscall.Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { _p0 = 1 } r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } return } func DestroyEnvironmentBlock(block *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) if r1 == 0 { err = errnoErr(e1) } return } func GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetProfilesDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)), 0) if r1 == 0 { err = errnoErr(e1) } return } func WSAGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { var _p0 uint32 if wait { _p0 = 1 } r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) if r1 == 0 { err = errnoErr(e1) } return } func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protinfo)), uintptr(group), uintptr(flags)) handle = syscall.Handle(r0) if handle == syscall.InvalidHandle { err = errnoErr(e1) } return }
go/src/internal/syscall/windows/zsyscall_windows.go/0
{ "file_path": "go/src/internal/syscall/windows/zsyscall_windows.go", "repo_id": "go", "token_count": 7764 }
298
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testlog import ( "sync" _ "unsafe" // for linkname ) // PanicOnExit0 reports whether to panic on a call to os.Exit(0). // This is in the testlog package because, like other definitions in // package testlog, it is a hook between the testing package and the // os package. This is used to ensure that an early call to os.Exit(0) // does not cause a test to pass. func PanicOnExit0() bool { panicOnExit0.mu.Lock() defer panicOnExit0.mu.Unlock() return panicOnExit0.val } // panicOnExit0 is the flag used for PanicOnExit0. This uses a lock // because the value can be cleared via a timer call that may race // with calls to os.Exit var panicOnExit0 struct { mu sync.Mutex val bool } // SetPanicOnExit0 sets panicOnExit0 to v. // // SetPanicOnExit0 should be an internal detail, // but alternate implementations of go test in other // build systems may need to access it using linkname. // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname SetPanicOnExit0 func SetPanicOnExit0(v bool) { panicOnExit0.mu.Lock() defer panicOnExit0.mu.Unlock() panicOnExit0.val = v }
go/src/internal/testlog/exit.go/0
{ "file_path": "go/src/internal/testlog/exit.go", "repo_id": "go", "token_count": 406 }
299