patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -15,8 +15,8 @@ """Forseti CLI installer.""" from forseti_installer import ForsetiInstaller - from util import gcloud +from util import constants class ForsetiClientInstaller(ForsetiInstaller): """Forseti command line interface installer"""
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Forseti CLI installer.""" from forseti_installer import ForsetiInstaller from util import gcloud class ForsetiClientInstaller(ForsetiInstaller): """Forseti command line interface installer""" def __init__(self, config, previous_installer=None): """Init Args: config (ClientConfig): The configuration object. previous_installer (ForsetiInstaller): The previous ran installer, we can get the installer environment information from it. """ super(ForsetiClientInstaller, self).__init__(config, previous_installer) (self.server_ip, self.server_zone, self.server_name) = gcloud.get_forseti_server_info() def deploy(self, deployment_tpl_path, conf_file_path, bucket_name): """Deploy Forseti using the deployment template. Grant access to service account. Args: deployment_tpl_path (str): Deployment template path conf_file_path (str): Configuration file path bucket_name (str): Name of the GCS bucket Returns: bool: Whether or not the deployment was successful str: Deployment name """ success, deployment_name = super(ForsetiClientInstaller, self).deploy( deployment_tpl_path, conf_file_path, bucket_name) if success: gcloud.grant_client_svc_acct_roles( self.project_id, self.gcp_service_acct_email, self.user_can_grant_roles) instance_name = 'forseti-{}-vm-{}'.format( self.config.installation_type, self.config.identifier) zone = '{}-c'.format(self.config.bucket_location) gcloud.enable_os_login(instance_name, zone) self.wait_until_vm_initialized(instance_name) return success, deployment_name def get_configuration_values(self): """Get configuration values Returns: dict: A dictionary of values needed to generate the forseti configuration file """ return { 'SERVER_IP': self.server_ip } def get_deployment_values(self): """Get deployment values Returns: dict: A dictionary of values needed to generate the forseti deployment template """ bucket_name = self.generate_bucket_name() return { 'FORSETI_BUCKET': bucket_name[len('gs://'):], 'BUCKET_LOCATION': self.config.bucket_location, 'GCP_CLIENT_SERVICE_ACCOUNT': self.gcp_service_acct_email, 'FORSETI_VERSION': self.version, 'FORSETI_SERVER_REGION': self.server_zone[:-2], 'FORSETI_SERVER_ZONE': self.server_zone, 'VPC_HOST_PROJECT_ID': self.config.vpc_host_project_id, 'VPC_HOST_NETWORK': self.config.vpc_host_network, 'VPC_HOST_SUBNETWORK': self.config.vpc_host_subnetwork }
1
31,895
alphasort this import
forseti-security-forseti-security
py
@@ -0,0 +1,14 @@ +#include<iostream> +using namespace std; +void reverseNum(int digit){ + while(digit>0){ + int n=digit%10; + digit=digit/10; + cout<<n<<"\t"; + } +} +int main(){ + int digit; + cin>>digit; + reverseNum(digit); +}
1
1
5,028
remove this file!
shoaibrayeen-Programmers-Community
c
@@ -256,11 +256,14 @@ func (acc *Account) String() string { etas = "0s" } } + name := []rune(acc.name) if fs.Config.StatsFileNameLength > 0 { if len(name) > fs.Config.StatsFileNameLength { - where := len(name) - fs.Config.StatsFileNameLength - name = append([]rune{'.', '.', '.'}, name[where:]...) + suffixLength := fs.Config.StatsFileNameLength / 2 + prefixLength := fs.Config.StatsFileNameLength - suffixLength + suffixStart := len(name) - suffixLength + name = append(append(name[:prefixLength], '…'), name[suffixStart:]...) } }
1
// Package accounting providers an accounting and limiting reader package accounting import ( "fmt" "io" "sync" "time" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/asyncreader" "github.com/ncw/rclone/fs/fserrors" "github.com/pkg/errors" ) // ErrorMaxTransferLimitReached is returned from Read when the max // transfer limit is reached. var ErrorMaxTransferLimitReached = fserrors.FatalError(errors.New("Max transfer limit reached as set by --max-transfer")) // Account limits and accounts for one transfer type Account struct { // The mutex is to make sure Read() and Close() aren't called // concurrently. Unfortunately the persistent connection loop // in http transport calls Read() after Do() returns on // CancelRequest so this race can happen when it apparently // shouldn't. mu sync.Mutex in io.Reader origIn io.ReadCloser close io.Closer size int64 name string statmu sync.Mutex // Separate mutex for stat values. bytes int64 // Total number of bytes read max int64 // if >=0 the max number of bytes to transfer start time.Time // Start time of first read lpTime time.Time // Time of last average measurement lpBytes int // Number of bytes read since last measurement avg float64 // Moving average of last few measurements in bytes/s closed bool // set if the file is closed exit chan struct{} // channel that will be closed when transfer is finished withBuf bool // is using a buffered in } const averagePeriod = 16 // period to do exponentially weighted averages over // NewAccountSizeName makes a Account reader for an io.ReadCloser of // the given size and name func NewAccountSizeName(in io.ReadCloser, size int64, name string) *Account { acc := &Account{ in: in, close: in, origIn: in, size: size, name: name, exit: make(chan struct{}), avg: 0, lpTime: time.Now(), max: int64(fs.Config.MaxTransfer), } go acc.averageLoop() Stats.inProgress.set(acc.name, acc) return acc } // NewAccount makes a Account reader for an object func NewAccount(in io.ReadCloser, obj fs.Object) *Account { return NewAccountSizeName(in, obj.Size(), obj.Remote()) } // WithBuffer - If the file is above a certain size it adds an Async reader func (acc *Account) WithBuffer() *Account { acc.withBuf = true var buffers int if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 { buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize) } else { buffers = int(acc.size / asyncreader.BufferSize) } // On big files add a buffer if buffers > 0 { rc, err := asyncreader.New(acc.origIn, buffers) if err != nil { fs.Errorf(acc.name, "Failed to make buffer: %v", err) } else { acc.in = rc acc.close = rc } } return acc } // GetReader returns the underlying io.ReadCloser under any Buffer func (acc *Account) GetReader() io.ReadCloser { acc.mu.Lock() defer acc.mu.Unlock() return acc.origIn } // GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader { acc.mu.Lock() defer acc.mu.Unlock() if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok { return asyncIn } return nil } // StopBuffering stops the async buffer doing any more buffering func (acc *Account) StopBuffering() { if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok { asyncIn.Abandon() } } // UpdateReader updates the underlying io.ReadCloser stopping the // asynb buffer (if any) and re-adding it func (acc *Account) UpdateReader(in io.ReadCloser) { acc.mu.Lock() acc.StopBuffering() acc.in = in acc.close = in acc.origIn = in acc.WithBuffer() acc.mu.Unlock() } // averageLoop calculates averages for the stats in the background func (acc *Account) averageLoop() { tick := time.NewTicker(time.Second) var period float64 defer tick.Stop() for { select { case now := <-tick.C: acc.statmu.Lock() // Add average of last second. elapsed := now.Sub(acc.lpTime).Seconds() avg := float64(acc.lpBytes) / elapsed // Soft start the moving average if period < averagePeriod { period++ } acc.avg = (avg + (period-1)*acc.avg) / period acc.lpBytes = 0 acc.lpTime = now // Unlock stats acc.statmu.Unlock() case <-acc.exit: return } } } // read bytes from the io.Reader passed in and account them func (acc *Account) read(in io.Reader, p []byte) (n int, err error) { acc.statmu.Lock() if acc.max >= 0 && Stats.GetBytes() >= acc.max { acc.statmu.Unlock() return 0, ErrorMaxTransferLimitReached } // Set start time. if acc.start.IsZero() { acc.start = time.Now() } acc.statmu.Unlock() n, err = in.Read(p) // Update Stats acc.statmu.Lock() acc.lpBytes += n acc.bytes += int64(n) acc.statmu.Unlock() Stats.Bytes(int64(n)) limitBandwidth(n) return } // Read bytes from the object - see io.Reader func (acc *Account) Read(p []byte) (n int, err error) { acc.mu.Lock() defer acc.mu.Unlock() return acc.read(acc.in, p) } // Close the object func (acc *Account) Close() error { acc.mu.Lock() defer acc.mu.Unlock() if acc.closed { return nil } acc.closed = true close(acc.exit) Stats.inProgress.clear(acc.name) return acc.close.Close() } // progress returns bytes read as well as the size. // Size can be <= 0 if the size is unknown. func (acc *Account) progress() (bytes, size int64) { if acc == nil { return 0, 0 } acc.statmu.Lock() bytes, size = acc.bytes, acc.size acc.statmu.Unlock() return bytes, size } // speed returns the speed of the current file transfer // in bytes per second, as well a an exponentially weighted moving average // If no read has completed yet, 0 is returned for both values. func (acc *Account) speed() (bps, current float64) { if acc == nil { return 0, 0 } acc.statmu.Lock() defer acc.statmu.Unlock() if acc.bytes == 0 { return 0, 0 } // Calculate speed from first read. total := float64(time.Now().Sub(acc.start)) / float64(time.Second) bps = float64(acc.bytes) / total current = acc.avg return } // eta returns the ETA of the current operation, // rounded to full seconds. // If the ETA cannot be determined 'ok' returns false. func (acc *Account) eta() (etaDuration time.Duration, ok bool) { if acc == nil { return 0, false } acc.statmu.Lock() defer acc.statmu.Unlock() return eta(acc.bytes, acc.size, acc.avg) } // String produces stats for this file func (acc *Account) String() string { a, b := acc.progress() _, cur := acc.speed() eta, etaok := acc.eta() etas := "-" if etaok { if eta > 0 { etas = fmt.Sprintf("%v", eta) } else { etas = "0s" } } name := []rune(acc.name) if fs.Config.StatsFileNameLength > 0 { if len(name) > fs.Config.StatsFileNameLength { where := len(name) - fs.Config.StatsFileNameLength name = append([]rune{'.', '.', '.'}, name[where:]...) } } if fs.Config.DataRateUnit == "bits" { cur = cur * 8 } percentageDone := 0 if b > 0 { percentageDone = int(100 * float64(a) / float64(b)) } done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b)) return fmt.Sprintf("%45s: %s, %s/s, %s", string(name), done, fs.SizeSuffix(cur), etas, ) } // RemoteStats produces stats for this file func (acc *Account) RemoteStats() (out map[string]interface{}) { out = make(map[string]interface{}) a, b := acc.progress() out["bytes"] = a out["size"] = b spd, cur := acc.speed() out["speed"] = spd out["speedAvg"] = cur eta, etaok := acc.eta() out["eta"] = nil if etaok { if eta > 0 { out["eta"] = eta.Seconds() } else { out["eta"] = 0 } } out["name"] = acc.name percentageDone := 0 if b > 0 { percentageDone = int(100 * float64(a) / float64(b)) } out["percentage"] = percentageDone return out } // OldStream returns the top io.Reader func (acc *Account) OldStream() io.Reader { acc.mu.Lock() defer acc.mu.Unlock() return acc.in } // SetStream updates the top io.Reader func (acc *Account) SetStream(in io.Reader) { acc.mu.Lock() acc.in = in acc.mu.Unlock() } // WrapStream wraps an io Reader so it will be accounted in the same // way as account func (acc *Account) WrapStream(in io.Reader) io.Reader { return &accountStream{ acc: acc, in: in, } } // accountStream accounts a single io.Reader into a parent *Account type accountStream struct { acc *Account in io.Reader } // OldStream return the underlying stream func (a *accountStream) OldStream() io.Reader { return a.in } // SetStream set the underlying stream func (a *accountStream) SetStream(in io.Reader) { a.in = in } // WrapStream wrap in in an accounter func (a *accountStream) WrapStream(in io.Reader) io.Reader { return a.acc.WrapStream(in) } // Read bytes from the object - see io.Reader func (a *accountStream) Read(p []byte) (n int, err error) { return a.acc.read(a.in, p) } // Accounter accounts a stream allowing the accounting to be removed and re-added type Accounter interface { io.Reader OldStream() io.Reader SetStream(io.Reader) WrapStream(io.Reader) io.Reader } // WrapFn wraps an io.Reader (for accounting purposes usually) type WrapFn func(io.Reader) io.Reader // UnWrap unwraps a reader returning unwrapped and wrap, a function to // wrap it back up again. If `in` is an Accounter then this function // will take the accounting unwrapped and wrap will put it back on // again the new Reader passed in. // // This allows functions which wrap io.Readers to move the accounting // to the end of the wrapped chain of readers. This is very important // if buffering is being introduced and if the Reader might be wrapped // again. func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) { acc, ok := in.(Accounter) if !ok { return in, func(r io.Reader) io.Reader { return r } } return acc.OldStream(), acc.WrapStream }
1
7,665
You don't need to say `rune(' ')` - `' '` is already a `rune`.
rclone-rclone
go
@@ -38,7 +38,7 @@ module Selenium port end - IGNORED_ERRORS = [Errno::EADDRNOTAVAIL] + IGNORED_ERRORS = [Errno::EADDRNOTAVAIL].freeze IGNORED_ERRORS << Errno::EBADF if Platform.cygwin? IGNORED_ERRORS.freeze
1
# encoding: utf-8 # # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. module Selenium module WebDriver class PortProber def self.above(port) port += 1 until free? port port end def self.random # TODO: Avoid this # # (a) should pick a port that's guaranteed to be free on all interfaces # (b) should pick a random port outside the ephemeral port range # server = TCPServer.new(Platform.localhost, 0) port = server.addr[1] server.close port end IGNORED_ERRORS = [Errno::EADDRNOTAVAIL] IGNORED_ERRORS << Errno::EBADF if Platform.cygwin? IGNORED_ERRORS.freeze def self.free?(port) Platform.interfaces.each do |host| begin TCPServer.new(host, port).close rescue *IGNORED_ERRORS => ex WebDriver.logger.debug("port prober could not bind to #{host}:#{port} (#{ex.message})") # ignored - some machines appear unable to bind to some of their interfaces end end true rescue SocketError, Errno::EADDRINUSE false end end # PortProber end # WebDriver end # Selenium
1
14,876
We can't freeze this and then add something to it in the next line. The `freeze` on line 43 is sufficient. If Rubocop flags this we need to exclude it.
SeleniumHQ-selenium
java
@@ -0,0 +1,5 @@ +package javaslang.control; + +public interface Kind<TYPE extends Kind<TYPE, ?, ?>, E, T> { + +}
1
1
6,540
We call it Kind2 and move out of the `javaslang/control` package into the `javaslang`package. Maybe I will later generate Kind1..Kindn but that's another story.
vavr-io-vavr
java
@@ -166,7 +166,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB if db.capacity == 0 { db.capacity = defaultCapacity } - db.logger.Infof("db capacity: %v", db.capacity) + db.logger.Infof("database capacity in chunks (and in Bytes): %v (%v)", db.capacity, db.capacity*swarm.ChunkSize) if maxParallelUpdateGC > 0 { db.updateGCSem = make(chan struct{}, maxParallelUpdateGC) }
1
// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package localstore import ( "encoding/binary" "errors" "os" "runtime/pprof" "sync" "time" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/shed" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/tags" "github.com/prometheus/client_golang/prometheus" "github.com/syndtr/goleveldb/leveldb" ) var _ storage.Storer = &DB{} var ( // ErrInvalidMode is retuned when an unknown Mode // is provided to the function. ErrInvalidMode = errors.New("invalid mode") ) var ( // Default value for Capacity DB option. defaultCapacity uint64 = 5000000 // Limit the number of goroutines created by Getters // that call updateGC function. Value 0 sets no limit. maxParallelUpdateGC = 1000 ) // DB is the local store implementation and holds // database related objects. type DB struct { shed *shed.DB tags *tags.Tags // schema name of loaded data schemaName shed.StringField // retrieval indexes retrievalDataIndex shed.Index retrievalAccessIndex shed.Index // push syncing index pushIndex shed.Index // push syncing subscriptions triggers pushTriggers []chan struct{} pushTriggersMu sync.RWMutex // pull syncing index pullIndex shed.Index // pull syncing subscriptions triggers per bin pullTriggers map[uint8][]chan struct{} pullTriggersMu sync.RWMutex // binIDs stores the latest chunk serial ID for every // proximity order bin binIDs shed.Uint64Vector // garbage collection index gcIndex shed.Index // garbage collection exclude index for pinned contents gcExcludeIndex shed.Index // pin files Index pinIndex shed.Index // field that stores number of intems in gc index gcSize shed.Uint64Field // garbage collection is triggered when gcSize exceeds // the capacity value capacity uint64 // triggers garbage collection event loop collectGarbageTrigger chan struct{} // a buffered channel acting as a semaphore // to limit the maximal number of goroutines // created by Getters to call updateGC function updateGCSem chan struct{} // a wait group to ensure all updateGC goroutines // are done before closing the database updateGCWG sync.WaitGroup // baseKey is the overlay address baseKey []byte batchMu sync.Mutex // this channel is closed when close function is called // to terminate other goroutines close chan struct{} // protect Close method from exiting before // garbage collection and gc size write workers // are done collectGarbageWorkerDone chan struct{} // wait for all subscriptions to finish before closing // underlaying BadgerDB to prevent possible panics from // iterators subscritionsWG sync.WaitGroup metrics metrics logger logging.Logger } // Options struct holds optional parameters for configuring DB. type Options struct { // Capacity is a limit that triggers garbage collection when // number of items in gcIndex equals or exceeds it. Capacity uint64 // MetricsPrefix defines a prefix for metrics names. MetricsPrefix string Tags *tags.Tags } // New returns a new DB. All fields and indexes are initialized // and possible conflicts with schema from existing database is checked. // One goroutine for writing batches is created. func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB, err error) { if o == nil { // default options o = &Options{ Capacity: defaultCapacity, } } db = &DB{ capacity: o.Capacity, baseKey: baseKey, tags: o.Tags, // channel collectGarbageTrigger // needs to be buffered with the size of 1 // to signal another event if it // is triggered during already running function collectGarbageTrigger: make(chan struct{}, 1), close: make(chan struct{}), collectGarbageWorkerDone: make(chan struct{}), metrics: newMetrics(), logger: logger, } if db.capacity == 0 { db.capacity = defaultCapacity } db.logger.Infof("db capacity: %v", db.capacity) if maxParallelUpdateGC > 0 { db.updateGCSem = make(chan struct{}, maxParallelUpdateGC) } db.shed, err = shed.NewDB(path) if err != nil { return nil, err } // Identify current storage schema by arbitrary name. db.schemaName, err = db.shed.NewStringField("schema-name") if err != nil { return nil, err } schemaName, err := db.schemaName.Get() if err != nil && !errors.Is(err, leveldb.ErrNotFound) { return nil, err } if schemaName == "" { // initial new localstore run err := db.schemaName.Put(DbSchemaCurrent) if err != nil { return nil, err } } else { // execute possible migrations err = db.migrate(schemaName) if err != nil { return nil, err } } // Persist gc size. db.gcSize, err = db.shed.NewUint64Field("gc-size") if err != nil { return nil, err } // Index storing actual chunk address, data and bin id. db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { return fields.Address, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.Address = key return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { b := make([]byte, 16) binary.BigEndian.PutUint64(b[:8], fields.BinID) binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp)) value = append(b, fields.Data...) return value, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16])) e.BinID = binary.BigEndian.Uint64(value[:8]) e.Data = value[16:] return e, nil }, }) if err != nil { return nil, err } // Index storing access timestamp for a particular address. // It is needed in order to update gc index keys for iteration order. db.retrievalAccessIndex, err = db.shed.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { return fields.Address, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.Address = key return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { b := make([]byte, 8) binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp)) return b, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { e.AccessTimestamp = int64(binary.BigEndian.Uint64(value)) return e, nil }, }) if err != nil { return nil, err } // pull index allows history and live syncing per po bin db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash|Tag", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { key = make([]byte, 41) key[0] = db.po(swarm.NewAddress(fields.Address)) binary.BigEndian.PutUint64(key[1:9], fields.BinID) return key, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.BinID = binary.BigEndian.Uint64(key[1:9]) return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { value = make([]byte, 36) // 32 bytes address, 4 bytes tag copy(value, fields.Address) if fields.Tag != 0 { binary.BigEndian.PutUint32(value[32:], fields.Tag) } return value, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { e.Address = value[:32] if len(value) > 32 { e.Tag = binary.BigEndian.Uint32(value[32:]) } return e, nil }, }) if err != nil { return nil, err } // create a vector for bin IDs db.binIDs, err = db.shed.NewUint64Vector("bin-ids") if err != nil { return nil, err } // create a pull syncing triggers used by SubscribePull function db.pullTriggers = make(map[uint8][]chan struct{}) // push index contains as yet unsynced chunks db.pushIndex, err = db.shed.NewIndex("StoreTimestamp|Hash->Tags", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { key = make([]byte, 40) binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp)) copy(key[8:], fields.Address) return key, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.Address = key[8:] e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[:8])) return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { tag := make([]byte, 4) binary.BigEndian.PutUint32(tag, fields.Tag) return tag, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { if len(value) == 4 { // only values with tag should be decoded e.Tag = binary.BigEndian.Uint32(value) } return e, nil }, }) if err != nil { return nil, err } // create a push syncing triggers used by SubscribePush function db.pushTriggers = make([]chan struct{}, 0) // gc index for removable chunk ordered by ascending last access time db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { b := make([]byte, 16, 16+len(fields.Address)) binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp)) binary.BigEndian.PutUint64(b[8:16], fields.BinID) key = append(b, fields.Address...) return key, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8])) e.BinID = binary.BigEndian.Uint64(key[8:16]) e.Address = key[16:] return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { return nil, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { return e, nil }, }) if err != nil { return nil, err } // Create a index structure for storing pinned chunks and their pin counts db.pinIndex, err = db.shed.NewIndex("Hash->PinCounter", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { return fields.Address, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.Address = key return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { b := make([]byte, 8) binary.BigEndian.PutUint64(b[:8], fields.PinCounter) return b, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { e.PinCounter = binary.BigEndian.Uint64(value[:8]) return e, nil }, }) if err != nil { return nil, err } // Create a index structure for excluding pinned chunks from gcIndex db.gcExcludeIndex, err = db.shed.NewIndex("Hash->nil", shed.IndexFuncs{ EncodeKey: func(fields shed.Item) (key []byte, err error) { return fields.Address, nil }, DecodeKey: func(key []byte) (e shed.Item, err error) { e.Address = key return e, nil }, EncodeValue: func(fields shed.Item) (value []byte, err error) { return nil, nil }, DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) { return e, nil }, }) if err != nil { return nil, err } // start garbage collection worker go db.collectGarbageWorker() return db, nil } // Close closes the underlying database. func (db *DB) Close() (err error) { close(db.close) // wait for all handlers to finish done := make(chan struct{}) go func() { db.updateGCWG.Wait() db.subscritionsWG.Wait() // wait for gc worker to // return before closing the shed <-db.collectGarbageWorkerDone close(done) }() select { case <-done: case <-time.After(5 * time.Second): db.logger.Errorf("localstore closed with still active goroutines") // Print a full goroutine dump to debug blocking. // TODO: use a logger to write a goroutine profile prof := pprof.Lookup("goroutine") err = prof.WriteTo(os.Stdout, 2) if err != nil { return err } } return db.shed.Close() } // po computes the proximity order between the address // and database base key. func (db *DB) po(addr swarm.Address) (bin uint8) { return swarm.Proximity(db.baseKey, addr.Bytes()) } // DebugIndices returns the index sizes for all indexes in localstore // the returned map keys are the index name, values are the number of elements in the index func (db *DB) DebugIndices() (indexInfo map[string]int, err error) { indexInfo = make(map[string]int) for k, v := range map[string]shed.Index{ "retrievalDataIndex": db.retrievalDataIndex, "retrievalAccessIndex": db.retrievalAccessIndex, "pushIndex": db.pushIndex, "pullIndex": db.pullIndex, "gcIndex": db.gcIndex, "gcExcludeIndex": db.gcExcludeIndex, "pinIndex": db.pinIndex, } { indexSize, err := v.Count() if err != nil { return indexInfo, err } indexInfo[k] = indexSize } val, err := db.gcSize.Get() if err != nil { return indexInfo, err } indexInfo["gcSize"] = int(val) return indexInfo, err } // chunkToItem creates new Item with data provided by the Chunk. func chunkToItem(ch swarm.Chunk) shed.Item { return shed.Item{ Address: ch.Address().Bytes(), Data: ch.Data(), Tag: ch.TagID(), } } // addressToItem creates new Item with a provided address. func addressToItem(addr swarm.Address) shed.Item { return shed.Item{ Address: addr.Bytes(), } } // addressesToItems constructs a slice of Items with only // addresses set on them. func addressesToItems(addrs ...swarm.Address) []shed.Item { items := make([]shed.Item, len(addrs)) for i, addr := range addrs { items[i] = shed.Item{ Address: addr.Bytes(), } } return items } // now is a helper function that returns a current unix timestamp // in UTC timezone. // It is set in the init function for usage in production, and // optionally overridden in tests for data validation. var now func() int64 func init() { // set the now function now = func() (t int64) { return time.Now().UTC().UnixNano() } } // totalTimeMetric logs a message about time between provided start time // and the time when the function is called and sends a resetting timer metric // with provided name appended with ".total-time". func totalTimeMetric(metric prometheus.Counter, start time.Time) { totalTime := time.Since(start) metric.Add(float64(totalTime)) }
1
10,551
i think it might be nicer to have a message like: `database capacity: %d chunks (%d bytes, %d megabytes)`. counting in bytes is so 90s :)
ethersphere-bee
go
@@ -195,6 +195,16 @@ public class Constants { // dir to keep dependency plugins public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir"; + + /* + * Prefix used to construct Hadoop/Spark user job link. + * a) RM: resource manager + * b) JHS: Hadoop job history server + * c) SHS: spark job history server + * */ + public static final String AZKABAN_RM_JOB_LINK = "azkaban.rm.job.link"; + public static final String AZKABAN_JHS_JOB_LINK = "azkaban.jhs.job.link"; + public static final String AZKABAN_SHS_JOB_LINK = "azkaban.shs.job.link"; } public static class FlowProperties {
1
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban; import java.time.Duration; /** * Constants used in configuration files or shared among classes. * * <p>Conventions: * * <p>Internal constants to be put in the {@link Constants} class * * <p>Configuration keys to be put in the {@link ConfigurationKeys} class * * <p>Flow level properties keys to be put in the {@link FlowProperties} class * * <p>Job level Properties keys to be put in the {@link JobProperties} class */ public class Constants { // Azkaban Flow Versions public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0; public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0; // Flow 2.0 file suffix public static final String PROJECT_FILE_SUFFIX = ".project"; public static final String FLOW_FILE_SUFFIX = ".flow"; // Flow 2.0 node type public static final String NODE_TYPE = "type"; public static final String FLOW_NODE_TYPE = "flow"; // Flow 2.0 flow and job path delimiter public static final String PATH_DELIMITER = ":"; // Flow trigger props public static final String SCHEDULE_TYPE = "type"; public static final String CRON_SCHEDULE_TYPE = "cron"; public static final String SCHEDULE_VALUE = "value"; // Job properties override suffix public static final String JOB_OVERRIDE_SUFFIX = ".jor"; // Names and paths of various file names to configure Azkaban public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port"; public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile"; public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app"; // Internal username used to perform SLA action public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla"; // Memory check retry interval when OOM in ms public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1; // Max number of memory check retry public static final int MEMORY_CHECK_RETRY_LIMIT = 720; public static final int DEFAULT_PORT_NUMBER = 8081; public static final int DEFAULT_SSL_PORT_NUMBER = 8443; public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20; // One Schedule's default End Time: 01/01/2050, 00:00:00, UTC public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L; // Default flow trigger max wait time public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10); public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1); // The flow exec id for a flow trigger instance which hasn't started a flow yet public static final int UNASSIGNED_EXEC_ID = -1; public static class ConfigurationKeys { // Configures Azkaban Flow Version in project YAML file public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version"; // These properties are configurable through azkaban.properties public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename"; // Defines a list of external links, each referred to as a topic public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics"; // External URL template of a given topic, specified in the list defined above public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url"; // Designates one of the external link topics to correspond to an execution analyzer public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic"; public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label"; // Designates one of the external link topics to correspond to a job log viewer public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic"; public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label"; // Configures the Kafka appender for logging user jobs, specified for the exec server public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList"; public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic"; // Represent the class name of azkaban metrics reporter. public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name"; // Represent the metrics server URL. public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url"; public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled"; // User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users. // enduser -> myazkabanhost:443 -> proxy -> localhost:8081 // when this parameters set then these parameters are used to generate email links. // if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used. public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname"; public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port"; public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port"; // Hostname for the host, if not specified, canonical hostname will be used public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname"; // List of users we prevent azkaban from running flows as. (ie: root, azkaban) public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users"; // Path name of execute-as-user executable public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib"; // Name of *nix group associated with the process running Azkaban public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name"; // Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs. // The property is used for the web server to get the host name of the executor when running in SOLO mode. public static final String EXECUTOR_HOST = "executor.host"; // Max flow running time in mins, server will kill flows running longer than this setting. // if not set or <= 0, then there's no restriction on running time. public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes"; public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type"; public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir"; public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path"; public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri"; public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal"; public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path"; public static final String PROJECT_TEMP_DIR = "project.temp.dir"; // Event reporting properties public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM = "azkaban.event.reporting.class"; public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS = "azkaban.event.reporting.kafka.brokers"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC = "azkaban.event.reporting.kafka.topic"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL = "azkaban.event.reporting.kafka.schema.registry.url"; /* * The max number of artifacts retained per project. * Accepted Values: * - 0 : Save all artifacts. No clean up is done on storage. * - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage * * Note: Having an unacceptable value results in an exception and the service would REFUSE * to start. * * Example: * a) azkaban.storage.artifact.max.retention=all * implies save all artifacts * b) azkaban.storage.artifact.max.retention=3 * implies save latest 3 versions saved in storage. **/ public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention"; // enable quartz scheduler and flow trigger if true. public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz"; public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential"; // dir to keep dependency plugins public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir"; } public static class FlowProperties { // Basic properties of flows as set by the executor server public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname"; public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid"; public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser"; public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid"; public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion"; } public static class JobProperties { // Job property that enables/disables using Kafka logging of user job logs public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable"; /* * this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available. * EXTRA_HCAT_CLUSTERS has the following format: * other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port" * Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster. * The uris(hcat servers) in a "cluster" ensures HA is provided. **/ public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters"; /* * the settings to be defined by user indicating if there are hcat locations other than the * default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are * supported, use comma to separate the values, values are case insensitive. **/ // Use EXTRA_HCAT_CLUSTERS instead @Deprecated public static final String EXTRA_HCAT_LOCATION = "other_hcat_location"; // If true, AZ will fetches the jobs' certificate from remote Certificate Authority. public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl"; // Job properties that indicate maximum memory size public static final String JOB_MAX_XMS = "job.max.Xms"; public static final String MAX_XMS_DEFAULT = "1G"; public static final String JOB_MAX_XMX = "job.max.Xmx"; public static final String MAX_XMX_DEFAULT = "2G"; // The hadoop user the job should run under. If not specified, it will default to submit user. public static final String USER_TO_PROXY = "user.to.proxy"; } public static class JobCallbackProperties { public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout"; public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout"; public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout"; public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout"; public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size"; } }
1
15,940
The '.' in the key names are used to separate namespaces, NOT to separate words. How about azkaban.external_resources.resource_manager? ---- Why is it better than using the full name in the variable name e.g. RESOURCE_MANAGER_LINK ?
azkaban-azkaban
java
@@ -17,7 +17,7 @@ using BenchmarkDotNet.Attributes; using Benchmarks.Tracing; using OpenTelemetry.Trace; -using OpenTelemetry.Trace.Configuration; +using OpenTelemetry.Trace; using OpenTelemetry.Trace.Samplers; namespace Benchmarks
1
// <copyright file="OpenTelemetrySdkBenchmarks.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using BenchmarkDotNet.Attributes; using Benchmarks.Tracing; using OpenTelemetry.Trace; using OpenTelemetry.Trace.Configuration; using OpenTelemetry.Trace.Samplers; namespace Benchmarks { [MemoryDiagnoser] public class OpenTelemetrySdkBenchmarks { private readonly Tracer alwaysSampleTracer; private readonly Tracer neverSampleTracer; private readonly Tracer noopTracer; public OpenTelemetrySdkBenchmarks() { using var openTelemetryAlwaysOnSample = OpenTelemetrySdk.EnableOpenTelemetry( (builder) => builder.AddActivitySource("AlwaysOnSample").SetSampler(new AlwaysOnSampler())); using var openTelemetryAlwaysOffSample = OpenTelemetrySdk.EnableOpenTelemetry( (builder) => builder.AddActivitySource("AlwaysOffSample").SetSampler(new AlwaysOffSampler())); using var openTelemetryNoOp = OpenTelemetrySdk.EnableOpenTelemetry(null); this.alwaysSampleTracer = TracerProvider.GetTracer("AlwaysOnSample"); this.neverSampleTracer = TracerProvider.GetTracer("AlwaysOffSample"); this.noopTracer = TracerProvider.GetTracer("NoOp"); } [Benchmark] public TelemetrySpan CreateSpan_Sampled() => SpanCreationScenarios.CreateSpan(this.alwaysSampleTracer); [Benchmark] public TelemetrySpan CreateSpan_ParentContext() => SpanCreationScenarios.CreateSpan_ParentContext(this.alwaysSampleTracer); [Benchmark] public TelemetrySpan CreateSpan_Attributes_Sampled() => SpanCreationScenarios.CreateSpan_Attributes(this.alwaysSampleTracer); [Benchmark] public TelemetrySpan CreateSpan_WithSpan() => SpanCreationScenarios.CreateSpan_Propagate(this.alwaysSampleTracer); [Benchmark] public TelemetrySpan CreateSpan_Active() => SpanCreationScenarios.CreateSpan_Active(this.alwaysSampleTracer); [Benchmark] public TelemetrySpan CreateSpan_Active_GetCurrent() => SpanCreationScenarios.CreateSpan_Active_GetCurrent(this.alwaysSampleTracer); [Benchmark] public void CreateSpan_Attributes_NotSampled() => SpanCreationScenarios.CreateSpan_Attributes(this.neverSampleTracer); [Benchmark(Baseline = true)] public TelemetrySpan CreateSpan_Noop() => SpanCreationScenarios.CreateSpan(this.noopTracer); [Benchmark] public TelemetrySpan CreateSpan_Attributes_Noop() => SpanCreationScenarios.CreateSpan_Attributes(this.noopTracer); [Benchmark] public TelemetrySpan CreateSpan_Propagate_Noop() => SpanCreationScenarios.CreateSpan_Propagate(this.noopTracer); } }
1
15,155
This seems to be duplicated with line 19?
open-telemetry-opentelemetry-dotnet
.cs
@@ -20,7 +20,7 @@ #endif #if !(defined(_WIN32) || (defined(__WXMAC__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4))) -#include <xlocale.h> +#include <wx/xlocale.h> #endif #include "stdwx.h"
1
// This file is part of BOINC. // http://boinc.berkeley.edu // Copyright (C) 2013 University of California // // BOINC is free software; you can redistribute it and/or modify it // under the terms of the GNU Lesser General Public License // as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // BOINC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. // See the GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with BOINC. If not, see <http://www.gnu.org/licenses/>. #if defined(__GNUG__) && !defined(__APPLE__) #pragma implementation "AsyncRPC.h" #endif #if !(defined(_WIN32) || (defined(__WXMAC__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4))) #include <xlocale.h> #endif #include "stdwx.h" #include "BOINCGUIApp.h" #include "MainDocument.h" #include "AsyncRPC.h" #include "BOINCBaseFrame.h" #include "BOINCTaskBar.h" #include "error_numbers.h" #include "SkinManager.h" #include "DlgEventLog.h" #include "util.h" extern bool s_bSkipExitConfirmation; // Delay in milliseconds before showing AsyncRPCDlg #define RPC_WAIT_DLG_DELAY 1500 // How often to check for events when minimized and waiting for Demand RPC #define DELAY_WHEN_MINIMIZED 500 // Delay in milliseconds to allow thread to exit before killing it #define RPC_KILL_DELAY 2000 ASYNC_RPC_REQUEST::ASYNC_RPC_REQUEST() { clear(); } ASYNC_RPC_REQUEST::~ASYNC_RPC_REQUEST() { clear(); } void ASYNC_RPC_REQUEST::clear() { rpcType = (ASYNC_RPC_TYPE) 0; which_rpc = (RPC_SELECTOR) 0; exchangeBuf = NULL; arg1 = NULL; arg2 = NULL; arg3 = NULL; arg4 = NULL; completionTime = NULL; RPCExecutionTime = NULL; resultPtr = NULL; retval = 0; isActive = false; } bool ASYNC_RPC_REQUEST::isSameAs(ASYNC_RPC_REQUEST& otherRequest) { if (which_rpc != otherRequest.which_rpc) return false; if (arg1 != otherRequest.arg1) return false; if (exchangeBuf != otherRequest.exchangeBuf) return false; if (arg2 != otherRequest.arg2) return false; if (arg3 != otherRequest.arg3) return false; if (arg4 != otherRequest.arg4) return false; if (rpcType != otherRequest.rpcType) return false; if (completionTime != otherRequest.completionTime) return false; if (resultPtr != otherRequest.resultPtr) return false; // OK if isActive and retval don't match. return true; } AsyncRPC::AsyncRPC(CMainDocument *pDoc) { m_pDoc = pDoc; } AsyncRPC::~AsyncRPC() {} int AsyncRPC::RPC_Wait(RPC_SELECTOR which_rpc, void *arg1, void *arg2, void *arg3, void *arg4, bool hasPriority ) { ASYNC_RPC_REQUEST request; int retval = 0; request.which_rpc = which_rpc; request.arg1 = arg1; request.arg2 = arg2; request.arg3 = arg3; request.arg4 = arg4; if (which_rpc == RPC_QUIT) { request.rpcType = RPC_TYPE_ASYNC_NO_REFRESH; } else { request.rpcType = RPC_TYPE_WAIT_FOR_COMPLETION; } request.RPCExecutionTime = NULL; retval = m_pDoc->RequestRPC(request, hasPriority); return retval; } RPCThread::RPCThread(CMainDocument *pDoc, BOINC_Mutex* pRPC_Thread_Mutex, BOINC_Condition* pRPC_Thread_Condition, BOINC_Mutex* pRPC_Request_Mutex, BOINC_Condition* pRPC_Request_Condition) : wxThread() { m_pDoc = pDoc; m_pRPC_Thread_Mutex = pRPC_Thread_Mutex; m_pRPC_Thread_Condition = pRPC_Thread_Condition; m_pRPC_Request_Mutex = pRPC_Request_Mutex; m_pRPC_Request_Condition = pRPC_Request_Condition; } void *RPCThread::Entry() { int retval = 0; CRPCFinishedEvent RPC_done_event( wxEVT_RPC_FINISHED ); ASYNC_RPC_REQUEST *current_request; double startTime = 0; wxMutexError mutexErr = wxMUTEX_NO_ERROR; wxCondError condErr = wxCOND_NO_ERROR; #ifndef NO_PER_THREAD_LOCALE #ifdef __WXMSW__ // On Windows, set all locales for this thread on a per-thread basis _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); setlocale(LC_ALL, "C"); #else // We initialize RPC_Thread_Locale to fix a compiler warning locale_t RPC_Thread_Locale = LC_GLOBAL_LOCALE; #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4) if (uselocale) // uselocale() is not available in Mac OS 10.3.9 #endif { // On Mac / Unix / Linux, set "C" locale for this thread only RPC_Thread_Locale = newlocale(LC_ALL_MASK, "C", NULL); uselocale(RPC_Thread_Locale); } #endif // ifndef __WXMSW__ #endif // ifndef NO_PER_THREAD_LOCALE m_pRPC_Thread_Mutex->Lock(); m_pDoc->m_bRPCThreadIsReady = true; while(true) { // Wait for main thread to wake us // This does the following: // (1) Unlocks the Mutex and puts the RPC thread to sleep as an atomic operation. // (2) On Signal from main thread: locks Mutex again and wakes the RPC thread. condErr = m_pRPC_Thread_Condition->Wait(); wxASSERT(condErr == wxCOND_NO_ERROR); if (m_pDoc->m_bShutDownRPCThread) { #if !defined(NO_PER_THREAD_LOCALE) && !defined(__WXMSW__) #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4) if (uselocale) // uselocale() is not available in Mac OS 10.3.9 #endif { uselocale(LC_GLOBAL_LOCALE); freelocale(RPC_Thread_Locale); } #endif m_pRPC_Thread_Mutex->Unlock(); // Just for safety - not really needed // Tell CMainDocument that thread has gracefully ended // We do this here because OnExit() is not called on Windows m_pDoc->m_RPCThread = NULL; return 0; } current_request = m_pDoc->GetCurrentRPCRequest(); if (!current_request->isActive) continue; // Should never happen if (current_request->RPCExecutionTime) { startTime = dtime(); } retval = ProcessRPCRequest(); if (current_request->RPCExecutionTime) { *(current_request->RPCExecutionTime) = dtime() - startTime; } current_request->retval = retval; mutexErr = m_pRPC_Request_Mutex->Lock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); current_request->isActive = false; wxPostEvent( wxTheApp, RPC_done_event ); // Signal() is ignored / discarded unless the main thread is // currently blocked by m_pRPC_Request_Condition->Wait[Timeout]() m_pRPC_Request_Condition->Signal(); mutexErr = m_pRPC_Request_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } return NULL; } int RPCThread::ProcessRPCRequest() { int retval = 0; ASYNC_RPC_REQUEST *current_request = m_pDoc->GetCurrentRPCRequest(); switch (current_request->which_rpc) { // RPC_SELECTORS with no arguments case RPC_RUN_BENCHMARKS: case RPC_QUIT: case RPC_NETWORK_AVAILABLE: case RPC_PROJECT_ATTACH_FROM_FILE: case RPC_READ_GLOBAL_PREFS_OVERRIDE: case RPC_READ_CC_CONFIG: break; default: // All others must have at least one argument if (current_request->arg1 == NULL) { wxASSERT(false); return -1; } break; } switch (current_request->which_rpc) { case RPC_AUTHORIZE: retval = (m_pDoc->rpcClient).authorize((const char*)(current_request->arg1)); break; case RPC_EXCHANGE_VERSIONS: retval = (m_pDoc->rpcClient).exchange_versions(*(VERSION_INFO*)(current_request->arg1)); break; case RPC_GET_STATE: retval = (m_pDoc->rpcClient).get_state(*(CC_STATE*)(current_request->arg1)); break; case RPC_GET_RESULTS: retval = (m_pDoc->rpcClient).get_results(*(RESULTS*)(current_request->arg1), *(bool*)(current_request->arg2)); break; case RPC_GET_FILE_TRANSFERS: retval = (m_pDoc->rpcClient).get_file_transfers(*(FILE_TRANSFERS*)(current_request->arg1)); break; case RPC_GET_SIMPLE_GUI_INFO1: retval = (m_pDoc->rpcClient).get_simple_gui_info(*(SIMPLE_GUI_INFO*)(current_request->arg1)); break; case RPC_GET_SIMPLE_GUI_INFO2: // RPC_GET_SIMPLE_GUI_INFO2 is equivalent to doing both // RPC_GET_PROJECT_STATUS1 and RPC_GET_RESULTS retval = (m_pDoc->rpcClient).get_results(*(RESULTS*)(current_request->arg3), *(bool*)(current_request->arg4)); if (!retval) { retval = (m_pDoc->rpcClient).get_project_status(*(PROJECTS*)(current_request->arg1)); } break; case RPC_GET_PROJECT_STATUS1: retval = (m_pDoc->rpcClient).get_project_status(*(PROJECTS*)(current_request->arg1)); break; case RPC_GET_PROJECT_STATUS2: retval = (m_pDoc->rpcClient).get_project_status(*(PROJECTS*)(current_request->arg1)); break; case RPC_GET_ALL_PROJECTS_LIST: retval = (m_pDoc->rpcClient).get_all_projects_list(*(ALL_PROJECTS_LIST*)(current_request->arg1)); break; case RPC_GET_DISK_USAGE: retval = (m_pDoc->rpcClient).get_disk_usage(*(DISK_USAGE*)(current_request->arg1)); break; case RPC_PROJECT_OP: retval = (m_pDoc->rpcClient).project_op( *(PROJECT*)(current_request->arg1), (const char*)(current_request->arg2) ); break; case RPC_SET_RUN_MODE: retval = (m_pDoc->rpcClient).set_run_mode( *(int*)(current_request->arg1), *(double*)(current_request->arg2) ); break; case RPC_SET_GPU_MODE: retval = (m_pDoc->rpcClient).set_gpu_mode( *(int*)(current_request->arg1), *(double*)(current_request->arg2) ); break; case RPC_SET_NETWORK_MODE: retval = (m_pDoc->rpcClient).set_network_mode( *(int*)(current_request->arg1), *(double*)(current_request->arg2) ); break; case RPC_GET_SCREENSAVER_TASKS: retval = (m_pDoc->rpcClient).get_screensaver_tasks( *(int*)(current_request->arg1), *(RESULTS*)(current_request->arg2) ); break; case RPC_RUN_BENCHMARKS: retval = (m_pDoc->rpcClient).run_benchmarks(); break; case RPC_SET_PROXY_SETTINGS: retval = (m_pDoc->rpcClient).set_proxy_settings(*(GR_PROXY_INFO*)(current_request->arg1)); break; case RPC_GET_PROXY_SETTINGS: retval = (m_pDoc->rpcClient).get_proxy_settings(*(GR_PROXY_INFO*)(current_request->arg1)); break; case RPC_GET_NOTICES: retval = (m_pDoc->rpcClient).get_notices( *(int*)(current_request->arg1), *(NOTICES*)(current_request->arg2) ); break; case RPC_GET_MESSAGES: retval = (m_pDoc->rpcClient).get_messages( *(int*)(current_request->arg1), *(MESSAGES*)(current_request->arg2), *(bool*)(current_request->arg3) ); break; case RPC_FILE_TRANSFER_OP: retval = (m_pDoc->rpcClient).file_transfer_op( *(FILE_TRANSFER*)(current_request->arg1), (const char*)(current_request->arg2) ); break; case RPC_RESULT_OP: retval = (m_pDoc->rpcClient).result_op( *(RESULT*)(current_request->arg1), (const char*)(current_request->arg2) ); break; case RPC_GET_HOST_INFO: retval = (m_pDoc->rpcClient).get_host_info(*(HOST_INFO*)(current_request->arg1)); break; case RPC_QUIT: retval = (m_pDoc->rpcClient).quit(); break; case RPC_ACCT_MGR_INFO: retval = (m_pDoc->rpcClient).acct_mgr_info(*(ACCT_MGR_INFO*)(current_request->arg1)); break; case RPC_GET_STATISTICS: retval = (m_pDoc->rpcClient).get_statistics(*(PROJECTS*)(current_request->arg1)); break; case RPC_NETWORK_AVAILABLE: retval = (m_pDoc->rpcClient).network_available(); break; case RPC_GET_PROJECT_INIT_STATUS: retval = (m_pDoc->rpcClient).get_project_init_status(*(PROJECT_INIT_STATUS*)(current_request->arg1)); break; case RPC_GET_PROJECT_CONFIG: retval = (m_pDoc->rpcClient).get_project_config(*(std::string*)(current_request->arg1)); break; case RPC_GET_PROJECT_CONFIG_POLL: retval = (m_pDoc->rpcClient).get_project_config_poll(*(PROJECT_CONFIG*)(current_request->arg1)); break; case RPC_LOOKUP_ACCOUNT: retval = (m_pDoc->rpcClient).lookup_account(*(ACCOUNT_IN*)(current_request->arg1)); break; case RPC_LOOKUP_ACCOUNT_POLL: retval = (m_pDoc->rpcClient).lookup_account_poll(*(ACCOUNT_OUT*)(current_request->arg1)); break; case RPC_CREATE_ACCOUNT: retval = (m_pDoc->rpcClient).create_account(*(ACCOUNT_IN*)(current_request->arg1)); break; case RPC_CREATE_ACCOUNT_POLL: retval = (m_pDoc->rpcClient).create_account_poll(*(ACCOUNT_OUT*)(current_request->arg1)); break; case RPC_PROJECT_ATTACH: retval = (m_pDoc->rpcClient).project_attach( (const char*)(current_request->arg1), (const char*)(current_request->arg2), (const char*)(current_request->arg3) ); break; case RPC_PROJECT_ATTACH_FROM_FILE: retval = (m_pDoc->rpcClient).project_attach_from_file(); break; case RPC_PROJECT_ATTACH_POLL: retval = (m_pDoc->rpcClient).project_attach_poll(*(PROJECT_ATTACH_REPLY*)(current_request->arg1)); break; case RPC_ACCT_MGR_RPC: retval = (m_pDoc->rpcClient).acct_mgr_rpc( (const char*)(current_request->arg1), (const char*)(current_request->arg2), (const char*)(current_request->arg3), (bool)(current_request->arg4 != NULL) ); break; case RPC_ACCT_MGR_RPC_POLL: retval = (m_pDoc->rpcClient).acct_mgr_rpc_poll(*(ACCT_MGR_RPC_REPLY*)(current_request->arg1)); break; case RPC_GET_NEWER_VERSION: retval = (m_pDoc->rpcClient).get_newer_version( *(std::string*)(current_request->arg1), *(std::string*)(current_request->arg2) ); break; case RPC_READ_GLOBAL_PREFS_OVERRIDE: retval = (m_pDoc->rpcClient).read_global_prefs_override(); break; case RPC_READ_CC_CONFIG: retval = (m_pDoc->rpcClient).read_cc_config(); break; case RPC_GET_CC_STATUS: retval = (m_pDoc->rpcClient).get_cc_status(*(CC_STATUS*)(current_request->arg1)); break; case RPC_GET_GLOBAL_PREFS_FILE: retval = (m_pDoc->rpcClient).get_global_prefs_file(*(std::string*)(current_request->arg1)); break; case RPC_GET_GLOBAL_PREFS_WORKING: retval = (m_pDoc->rpcClient).get_global_prefs_working(*(std::string*)(current_request->arg1)); break; case RPC_GET_GLOBAL_PREFS_WORKING_STRUCT: retval = (m_pDoc->rpcClient).get_global_prefs_working_struct( *(GLOBAL_PREFS*)(current_request->arg1), *(GLOBAL_PREFS_MASK*)(current_request->arg2) ); break; case RPC_GET_GLOBAL_PREFS_OVERRIDE: retval = (m_pDoc->rpcClient).get_global_prefs_override(*(std::string*)(current_request->arg1)); break; case RPC_SET_GLOBAL_PREFS_OVERRIDE: retval = (m_pDoc->rpcClient).set_global_prefs_override(*(std::string*)(current_request->arg1)); break; case RPC_GET_GLOBAL_PREFS_OVERRIDE_STRUCT: retval = (m_pDoc->rpcClient).get_global_prefs_override_struct( *(GLOBAL_PREFS*)(current_request->arg1), *(GLOBAL_PREFS_MASK*)(current_request->arg2) ); break; case RPC_SET_GLOBAL_PREFS_OVERRIDE_STRUCT: retval = (m_pDoc->rpcClient).set_global_prefs_override_struct( *(GLOBAL_PREFS*)(current_request->arg1), *(GLOBAL_PREFS_MASK*)(current_request->arg2) ); break; case RPC_GET_CC_CONFIG: retval = (m_pDoc->rpcClient).get_cc_config( *(CC_CONFIG*)(current_request->arg1), *(LOG_FLAGS*)(current_request->arg2) ); break; case RPC_SET_CC_CONFIG: retval = (m_pDoc->rpcClient).set_cc_config( *(CC_CONFIG*)(current_request->arg1), *(LOG_FLAGS*)(current_request->arg2) ); break; case RPC_SET_LANGUAGE: retval = (m_pDoc->rpcClient).set_language( (const char*)(current_request->arg1) ); break; default: break; } return retval; } // TODO: combine RPC requests for different buffers, then just copy the buffer. int CMainDocument::RequestRPC(ASYNC_RPC_REQUEST& request, bool hasPriority) { std::vector<ASYNC_RPC_REQUEST>::iterator iter; int retval = 0; int response = wxID_OK; wxMutexError mutexErr = wxMUTEX_NO_ERROR; long delayTimeRemaining, timeToSleep; bool shown = false; if (!m_RPCThread) return -1; if ( (request.rpcType < RPC_TYPE_WAIT_FOR_COMPLETION) || (request.rpcType >= NUM_RPC_TYPES) ) { wxASSERT(false); return -1; } // If we are quitting, cancel any pending RPCs if (request.which_rpc == RPC_QUIT) { if (current_rpc_request.isActive) { RPC_requests.erase(RPC_requests.begin()+1, RPC_requests.end()); } else { RPC_requests.clear(); } } // Check if a duplicate request is already on the queue for (iter=RPC_requests.begin(); iter!=RPC_requests.end(); ++iter) { if (iter->isSameAs(request)) { return 0; } } if ((request.rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) && (request.resultPtr == NULL)) { request.resultPtr = &retval; } if (hasPriority) { // We may want to set hasPriority for some user-initiated events. // Since the user is waiting, insert this at head of request queue. // As of 8/14/08, hasPriority is never set true, so hasn't been tested. iter = RPC_requests.insert(RPC_requests.begin(), request); } else { RPC_requests.push_back(request); } // Start this RPC if no other RPC is already in progress. if (RPC_requests.size() == 1) { // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait() mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex wxASSERT(mutexErr == wxMUTEX_NO_ERROR); // Make sure activation is an atomic operation request.isActive = false; current_rpc_request = request; current_rpc_request.isActive = true; m_pRPC_Thread_Condition->Signal(); // Unblock the thread // m_pRPC_Thread_Condition->Wait() will Lock() the mutex upon receiving Signal(), // causing it to block again if we still have our lock on the mutex. mutexErr = m_pRPC_Thread_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } // If this is a user-initiated event wait for completion but show // a dialog allowing the user to cancel. if (request.rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) { // TODO: proper handling if a second user request is received while first is pending ?? if (m_bWaitingForRPC) { wxLogMessage(wxT("Second user RPC request while another was pending")); wxASSERT(false); return -1; } // Don't show dialog if RPC completes before RPC_WAIT_DLG_DELAY // or while BOINC is minimized CBOINCBaseFrame* pFrame = wxGetApp().GetFrame(); wxStopWatch Dlgdelay = wxStopWatch(); m_RPCWaitDlg = new AsyncRPCDlg(); m_bWaitingForRPC = true; // Allow RPC_WAIT_DLG_DELAY seconds for Demand RPC to complete before // displaying "Please Wait" dialog, but keep checking for completion. delayTimeRemaining = RPC_WAIT_DLG_DELAY; while (true) { if (delayTimeRemaining >= 0) { // Prevent overflow if minimized for a very long time delayTimeRemaining = RPC_WAIT_DLG_DELAY - Dlgdelay.Time(); } if (pFrame) { shown = pFrame->IsShown(); } else { shown = false; } if (shown) { if (delayTimeRemaining <= 0) break; // Display the Please Wait dialog timeToSleep = delayTimeRemaining; } else { // Don't show dialog while Manager is minimized, but do // process events so user can maximize the manager. // // NOTE: CBOINCGUIApp::FilterEvent() discards those events // which might cause posting of more RPC requests while // we are in this loop, to prevent undesirable recursion. // Since the manager is minimized, we don't have to worry about // discarding crucial drawing or command events. // The filter does allow the the Open Manager menu item from // the system tray icon and wxEVT_RPC_FINISHED event. // timeToSleep = DELAY_WHEN_MINIMIZED; // Allow user to maximize Manager wxSafeYield(NULL, true); } // OnRPCComplete() clears m_bWaitingForRPC if RPC completed if (! m_bWaitingForRPC) { return retval; } mutexErr = m_pRPC_Request_Mutex->Lock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); // Simulate handling of CRPCFinishedEvent but don't allow any other // events (so no user activity) to prevent undesirable recursion. // Since we don't need to filter and discard events, they remain on // the queue until it is safe to process them. // Allow RPC thread to run while we wait for it. if (!current_rpc_request.isActive) { mutexErr = m_pRPC_Request_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); HandleCompletedRPC(); continue; } // Wait for RPC thread to wake us // This does the following: // (1) Unlocks the Mutex and puts the main thread to sleep as an atomic operation. // (2) On Signal from RPC thread: locks Mutex again and wakes the main thread. m_pRPC_Request_Condition->WaitTimeout(timeToSleep); mutexErr = m_pRPC_Request_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } // Demand RPC has taken longer than RPC_WAIT_DLG_DELAY seconds and // Manager is not minimized, so display the "Please Wait" dialog // with a Cancel button. If the RPC does complete while the dialog // is up, HandleCompletedRPC() will call EndModal with wxID_OK. // // NOTE: the Modal dialog permits processing of all events, but // CBOINCGUIApp::FilterEvent() blocks those events which might cause // posting of more RPC requests while in this dialog, to prevent // undesirable recursion. // if (m_RPCWaitDlg) { response = m_RPCWaitDlg->ShowModal(); // Remember time the dialog was closed for use by RunPeriodicRPCs() m_dtLasAsyncRPCDlgTime = wxDateTime::Now(); if (response != wxID_OK) { // TODO: If user presses Cancel in Please Wait dialog but request // has not yet been started, should we just remove it from queue? // If we make that change, should we also add a separate menu item // to reset the RPC connection (or does one already exist)? retval = -1; // If the RPC continues to get data after we return to // our caller, it may try to write into a buffer or struct // which the caller has already deleted. To prevent this, // we close the socket (disconnect) and kill the RPC thread. // This is ugly but necessary. We must then reconnect and // start a new RPC thread. if (current_rpc_request.isActive) { current_rpc_request.isActive = false; rpcClient.close(); RPC_requests.clear(); current_rpc_request.clear(); m_bNeedRefresh = false; m_bNeedTaskBarRefresh = false; // We will be reconnected to the same client (if possible) by // CBOINCDialUpManager::OnPoll() and CNetworkConnection::Poll(). m_pNetworkConnection->SetStateDisconnected(); } if (response == wxID_EXIT) { pFrame = wxGetApp().GetFrame(); wxCommandEvent evt(wxEVT_COMMAND_MENU_SELECTED, wxID_EXIT); s_bSkipExitConfirmation = true; pFrame->GetEventHandler()->AddPendingEvent(evt); } } if (m_RPCWaitDlg) { m_RPCWaitDlg->Destroy(); } m_RPCWaitDlg = NULL; m_bWaitingForRPC = false; } } return retval; } void CMainDocument::KillRPCThread() { wxMutexError mutexErr = wxMUTEX_NO_ERROR; int i; if (!m_RPCThread) { return; } m_bNeedRefresh = false; m_bNeedTaskBarRefresh = false; rpcClient.close(); // Abort any async RPC in progress (in case hung) // On some platforms, Delete() takes effect only when thread calls TestDestroy() // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait() mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex wxASSERT(mutexErr == wxMUTEX_NO_ERROR); m_bShutDownRPCThread = true; m_pRPC_Thread_Condition->Signal(); // Unblock the thread mutexErr = m_pRPC_Thread_Mutex->Unlock(); // Release the mutex so thread can lock it wxASSERT(mutexErr == wxMUTEX_NO_ERROR); RPC_requests.clear(); current_rpc_request.clear(); // Wait up to RPC_KILL_DELAY milliseconds for thread to exit on its own for (i=0; i< RPC_KILL_DELAY; ++i) { boinc_sleep(.001); // Defer to RPC thread for 1 millisecond if (!m_RPCThread) { return; // RPC thread sets m_RPCThread to NULL when it exits } } // Thread failed to exit, so forcefully kill it m_RPCThread->Kill(); } void CMainDocument::OnRPCComplete(CRPCFinishedEvent&) { HandleCompletedRPC(); } void CMainDocument::HandleCompletedRPC() { int retval = 0; wxMutexError mutexErr = wxMUTEX_NO_ERROR; int i, n, requestIndex = -1; bool stillWaitingForPendingRequests = false; if (!m_RPCThread) return; if (current_rpc_request.isActive) return; // We can get here either via a CRPCFinishedEvent event posted // by the RPC thread or by a call from RequestRPC. If we were // called from RequestRPC, the CRPCFinishedEvent will still be // on the event queue, so we get called twice. Check for this here. if (current_rpc_request.which_rpc == 0) return; // already handled by a call from RequestRPC // Find our completed request in the queue n = (int) RPC_requests.size(); for (i=0; i<n; ++i) { if (RPC_requests[i].isSameAs(current_rpc_request)) { requestIndex = i; } else { if (RPC_requests[i].rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) { stillWaitingForPendingRequests = true; } } } if (! stillWaitingForPendingRequests) { if (m_RPCWaitDlg) { if (m_RPCWaitDlg->IsShown()) { m_RPCWaitDlg->EndModal(wxID_OK); } m_RPCWaitDlg->Destroy(); m_RPCWaitDlg = NULL; } m_bWaitingForRPC = false; } if (requestIndex >= 0) { // Remove completed request from the queue RPC_requests.erase(RPC_requests.begin()+requestIndex); } retval = current_rpc_request.retval; if (current_rpc_request.completionTime) { *(current_rpc_request.completionTime) = wxDateTime::Now(); } if (current_rpc_request.resultPtr) { *(current_rpc_request.resultPtr) = retval; } // Post-processing if (! retval) { if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_REFRESH_AFTER) { if (!retval) { m_bNeedRefresh = true; } } if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_UPDATE_TASKBAR_ICON_AFTER) { if (!retval) { m_bNeedTaskBarRefresh = true; } } switch (current_rpc_request.which_rpc) { case RPC_GET_STATE: if (current_rpc_request.exchangeBuf && !retval) { CC_STATE* arg1 = (CC_STATE*)current_rpc_request.arg1; CC_STATE* exchangeBuf = (CC_STATE*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); arg1->apps.swap(exchangeBuf->apps); arg1->app_versions.swap(exchangeBuf->app_versions); arg1->wus.swap(exchangeBuf->wus); arg1->results.swap(exchangeBuf->results); exchangeBuf->global_prefs = arg1->global_prefs; exchangeBuf->version_info = arg1->version_info; exchangeBuf->executing_as_daemon = arg1->executing_as_daemon; exchangeBuf->host_info = arg1->host_info; exchangeBuf->time_stats = arg1->time_stats; exchangeBuf->have_nvidia = arg1->have_nvidia; exchangeBuf->have_ati = arg1->have_ati; } break; case RPC_GET_RESULTS: if (current_rpc_request.exchangeBuf && !retval) { RESULTS* arg1 = (RESULTS*)current_rpc_request.arg1; RESULTS* exchangeBuf = (RESULTS*)current_rpc_request.exchangeBuf; arg1->results.swap(exchangeBuf->results); } break; case RPC_GET_FILE_TRANSFERS: if (current_rpc_request.exchangeBuf && !retval) { FILE_TRANSFERS* arg1 = (FILE_TRANSFERS*)current_rpc_request.arg1; FILE_TRANSFERS* exchangeBuf = (FILE_TRANSFERS*)current_rpc_request.exchangeBuf; arg1->file_transfers.swap(exchangeBuf->file_transfers); } break; case RPC_GET_SIMPLE_GUI_INFO2: if (!retval) { retval = CopyProjectsToStateBuffer(*(PROJECTS*)(current_rpc_request.arg1), *(CC_STATE*)(current_rpc_request.arg2)); } if (current_rpc_request.exchangeBuf && !retval) { RESULTS* arg3 = (RESULTS*)current_rpc_request.arg3; RESULTS* exchangeBuf = (RESULTS*)current_rpc_request.exchangeBuf; arg3->results.swap(exchangeBuf->results); } break; case RPC_GET_PROJECT_STATUS1: if (!retval) { retval = CopyProjectsToStateBuffer(*(PROJECTS*)(current_rpc_request.arg1), *(CC_STATE*)(current_rpc_request.arg2)); } break; case RPC_GET_ALL_PROJECTS_LIST: if (current_rpc_request.exchangeBuf && !retval) { ALL_PROJECTS_LIST* arg1 = (ALL_PROJECTS_LIST*)current_rpc_request.arg1; ALL_PROJECTS_LIST* exchangeBuf = (ALL_PROJECTS_LIST*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); } break; case RPC_GET_DISK_USAGE: if (current_rpc_request.exchangeBuf && !retval) { DISK_USAGE* arg1 = (DISK_USAGE*)current_rpc_request.arg1; DISK_USAGE* exchangeBuf = (DISK_USAGE*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); exchangeBuf->d_total = arg1->d_total; exchangeBuf->d_free = arg1->d_free; exchangeBuf->d_boinc = arg1->d_boinc; exchangeBuf->d_allowed = arg1->d_allowed; } break; case RPC_GET_NOTICES: if (current_rpc_request.exchangeBuf && !retval) { NOTICES* arg2 = (NOTICES*)current_rpc_request.arg2; NOTICES* exchangeBuf = (NOTICES*)current_rpc_request.exchangeBuf; arg2->notices.swap(exchangeBuf->notices); } if (!retval) { CachedNoticeUpdate(); // Call this only when notice buffer is stable } m_bWaitingForGetNoticesRPC = false; break; case RPC_GET_MESSAGES: if (current_rpc_request.exchangeBuf && !retval) { MESSAGES* arg2 = (MESSAGES*)current_rpc_request.arg2; MESSAGES* exchangeBuf = (MESSAGES*)current_rpc_request.exchangeBuf; arg2->messages.swap(exchangeBuf->messages); } if (!retval) { CachedMessageUpdate(); // Call this only when message buffer is stable } break; case RPC_GET_HOST_INFO: if (current_rpc_request.exchangeBuf && !retval) { HOST_INFO* arg1 = (HOST_INFO*)current_rpc_request.arg1; HOST_INFO* exchangeBuf = (HOST_INFO*)current_rpc_request.exchangeBuf; *exchangeBuf = *arg1; } break; case RPC_GET_STATISTICS: if (current_rpc_request.exchangeBuf && !retval) { PROJECTS* arg1 = (PROJECTS*)current_rpc_request.arg1; PROJECTS* exchangeBuf = (PROJECTS*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); } break; case RPC_GET_CC_STATUS: if (current_rpc_request.exchangeBuf && !retval) { CC_STATUS* arg1 = (CC_STATUS*)current_rpc_request.arg1; CC_STATUS* exchangeBuf = (CC_STATUS*)current_rpc_request.exchangeBuf; *exchangeBuf = *arg1; } break; case RPC_ACCT_MGR_INFO: if (current_rpc_request.exchangeBuf && !retval) { ACCT_MGR_INFO* arg1 = (ACCT_MGR_INFO*)current_rpc_request.arg1; ACCT_MGR_INFO* exchangeBuf = (ACCT_MGR_INFO*)current_rpc_request.exchangeBuf; *exchangeBuf = *arg1; } break; default: // We don't support double buffering for other RPC calls wxASSERT(current_rpc_request.exchangeBuf == NULL); break; } } if (current_rpc_request.resultPtr) { // In case post-processing changed retval *(current_rpc_request.resultPtr) = retval; } // We must call ProcessEvent() rather than AddPendingEvent() here to // guarantee integrity of data when other events are handled (such as // Abort, Suspend/Resume, Show Graphics, Update, Detach, Reset, No // New Work, etc.) Otherwise, if one of those events is pending it // might be processed first, and the data in the selected rows may not // match the data which the user selected if any rows were added or // deleted due to the RPC. // The refresh event called here adjusts the selections to fix any // such mismatch before other pending events are processed. // // However, the refresh code may itself request a Demand RPC, which // would cause undesirable recursion if we are already waiting for // another Demand RPC to complete. In that case, we defer the refresh // until all pending Demand RPCs have been done. // if (m_bNeedRefresh && !m_bWaitingForRPC) { m_bNeedRefresh = false; // We must get the frame immediately before using it, // since it may have been changed by SetActiveGUI(). CBOINCBaseFrame* pFrame = wxGetApp().GetFrame(); if (pFrame) { CFrameEvent event(wxEVT_FRAME_REFRESHVIEW, pFrame); pFrame->GetEventHandler()->ProcessEvent(event); } } if (m_bNeedTaskBarRefresh && !m_bWaitingForRPC) { m_bNeedTaskBarRefresh = false; CTaskBarIcon* pTaskbar = wxGetApp().GetTaskBarIcon(); if (pTaskbar) { CTaskbarEvent event(wxEVT_TASKBAR_REFRESH, pTaskbar); pTaskbar->ProcessEvent(event); } } if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_REFRESH_EVENT_LOG_AFTER) { CDlgEventLog* eventLog = wxGetApp().GetEventLog(); if (eventLog) { eventLog->OnRefresh(); } } current_rpc_request.clear(); // Start the next RPC request. // We can't start this until finished processing the previous RPC's // event because the two requests may write into the same buffer. if (RPC_requests.size() > 0) { // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait() mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex wxASSERT(mutexErr == wxMUTEX_NO_ERROR); // Make sure activation is an atomic operation RPC_requests[0].isActive = false; current_rpc_request = RPC_requests[0]; current_rpc_request.isActive = true; m_pRPC_Thread_Condition->Signal(); // Unblock the thread // m_pRPC_Thread_Condition->Wait() will Lock() the mutex upon receiving Signal(), // causing it to block again if we still have our lock on the mutex. mutexErr = m_pRPC_Thread_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } } int CMainDocument::CopyProjectsToStateBuffer(PROJECTS& p, CC_STATE& ccstate) { int retval = 0; unsigned int i; PROJECT* state_project = NULL; // flag for delete for (i=0; i<ccstate.projects.size(); i++) { state_project = ccstate.projects[i]; state_project->flag_for_delete = true; } for (i=0; i<p.projects.size(); i++) { state_project = ccstate.lookup_project(p.projects[i]->master_url); if (state_project && (!strcmp(p.projects[i]->master_url, state_project->master_url))) { // Because the CC_STATE contains several pointers to each element of the // CC_STATE::projects vector, we must update these elements in place. *state_project = *(p.projects[i]); state_project->flag_for_delete = false; } else { retval = ERR_NOT_FOUND; } continue; } // Anything need to be deleted? if (!retval) { for (i=0; i<ccstate.projects.size(); i++) { state_project = ccstate.projects[i]; if (state_project->flag_for_delete) { retval = ERR_FILE_MISSING; } } } return retval; } BEGIN_EVENT_TABLE(AsyncRPCDlg, wxDialog) EVT_BUTTON(wxID_EXIT, AsyncRPCDlg::OnExit) END_EVENT_TABLE() IMPLEMENT_CLASS(AsyncRPCDlg, wxDialog) AsyncRPCDlg::AsyncRPCDlg() : wxDialog( NULL, wxID_ANY, wxT(""), wxDefaultPosition ) { CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced(); wxString exit_label; wxASSERT(pSkinAdvanced); wxString message = wxString(_("Communicating with BOINC client. Please wait ...")); #ifdef __WXMAC__ exit_label.Printf(_("&Quit %s"), pSkinAdvanced->GetApplicationName().c_str()); #else exit_label.Printf(_("E&xit %s"), pSkinAdvanced->GetApplicationName().c_str()); #endif wxString strCaption; strCaption.Printf(_("%s - Communication"), pSkinAdvanced->GetApplicationName().c_str()); SetTitle(strCaption.c_str()); wxBoxSizer *topsizer = new wxBoxSizer( wxVERTICAL ); wxBoxSizer *icon_text = new wxBoxSizer( wxHORIZONTAL ); icon_text->Add( CreateTextSizer( message ), 0, wxALIGN_CENTER | wxLEFT, 10 ); topsizer->Add( icon_text, 1, wxCENTER | wxLEFT|wxRIGHT|wxTOP, 10 ); wxStdDialogButtonSizer *sizerBtn = CreateStdDialogButtonSizer(0); wxButton* exitbutton = new wxButton; exitbutton->Create( this, wxID_EXIT, exit_label, wxDefaultPosition, wxDefaultSize, 0 ); sizerBtn->Add(exitbutton, 0, wxLEFT|wxRIGHT|wxALL, 5); wxButton* cancelbutton = new wxButton; cancelbutton->Create( this, wxID_CANCEL, _("Cancel"), wxDefaultPosition, wxDefaultSize, 0 ); sizerBtn->Add(cancelbutton, 0, wxLEFT|wxRIGHT|wxALL, 5); if ( sizerBtn ) topsizer->Add(sizerBtn, 0, wxEXPAND | wxALL, 10 ); SetAutoLayout( true ); SetSizer( topsizer ); topsizer->SetSizeHints( this ); topsizer->Fit( this ); wxSize size( GetSize() ); if (size.x < size.y*3/2) { size.x = size.y*3/2; SetSize( size ); } Centre( wxBOTH | wxCENTER_FRAME); } void AsyncRPCDlg::OnExit(wxCommandEvent& WXUNUSED(eventUnused)) { EndModal(wxID_EXIT); } #if 0 /// For testing: triggered by Advanced / Options menu item. void CMainDocument::TestAsyncRPC() { ALL_PROJECTS_LIST pl; ASYNC_RPC_REQUEST request; wxDateTime completionTime = wxDateTime((time_t)0); int req_retval = 0, rpc_result = 0; completionTime.ResetTime(); request.which_rpc = RPC_GET_ALL_PROJECTS_LIST; request.arg1 = &pl; request.exchangeBuf = NULL; request.arg2 = NULL; request.arg3 = NULL; request.arg4 = NULL; request.rpcType = RPC_TYPE_WAIT_FOR_COMPLETION; request.completionTime = &completionTime; // request.result = NULL; request.resultPtr = &rpc_result; // For testing async RPCs request.isActive = false; //retval = rpcClient.get_all_projects_list(pl); req_retval = RequestRPC(request, true); wxString s = FormatTime(); wxLogMessage(wxT("Completion time = %s"), s.c_str()); wxLogMessage(wxT("RequestRPC returned %d\n"), req_retval); ::wxSafeYield(NULL, true); // Allow processing of RPC_FINISHED event wxLogMessage(wxT("rpcClient.get_all_projects_list returned %d\n"), rpc_result); } #endif
1
8,569
This is wrong. xlocale.h is needed on OSX and maybe elsewhere too. This needs a test in configure to see if xlocale.h and uselocale() are available and an #if test here. Also, the NO_PER_THREAD_LOCALE stuff in configure.ac needs to be updated to include test for uselocale().
BOINC-boinc
php
@@ -1,4 +1,4 @@ -//snippet-sourcedescription:[DeleteAccessKey.java demonstrates how to delete an access key from an AWS Identity and Access Management (IAM) user.] +//snippet-sourcedescription:[DeleteAccessKey.java demonstrates how to delete an access key from an AWS Identity and Access Management (AWS IAM) user.] //snippet-keyword:[AWS SDK for Java v2] //snippet-keyword:[Code Sample] //snippet-service:[AWS IAM]
1
//snippet-sourcedescription:[DeleteAccessKey.java demonstrates how to delete an access key from an AWS Identity and Access Management (IAM) user.] //snippet-keyword:[AWS SDK for Java v2] //snippet-keyword:[Code Sample] //snippet-service:[AWS IAM] //snippet-sourcetype:[full-example] //snippet-sourcedate:[11/02/2020] //snippet-sourceauthor:[scmacdon-aws] /* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package com.example.iam; // snippet-start:[iam.java2.delete_access_key.import] import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.iam.IamClient; import software.amazon.awssdk.services.iam.model.DeleteAccessKeyRequest; import software.amazon.awssdk.services.iam.model.IamException; // snippet-end:[iam.java2.delete_access_key.import] public class DeleteAccessKey { public static void main(String[] args) { final String USAGE = "\n" + "Usage:\n" + " DeleteAccessKey <username> <accessKey> \n\n" + "Where:\n" + " username - the name of the user. \n\n" + " accessKey - the access key ID for the secret access key you want to delete. \n\n" ; if (args.length != 2) { System.out.println(USAGE); System.exit(1); } // Read the command line arguments String username = args[0]; String accessKey = args[1]; Region region = Region.AWS_GLOBAL; IamClient iam = IamClient.builder() .region(region) .build(); deleteKey(iam, username, accessKey); iam.close(); } // snippet-start:[iam.java2.delete_access_key.main] public static void deleteKey(IamClient iam ,String username, String accessKey ) { try { DeleteAccessKeyRequest request = DeleteAccessKeyRequest.builder() .accessKeyId(accessKey) .userName(username) .build(); iam.deleteAccessKey(request); System.out.println("Successfully deleted access key " + accessKey + " from user " + username); } catch (IamException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } // snippet-end:[iam.java2.delete_access_key.main] }
1
18,235
AWS Identity and Access Management (IAM)
awsdocs-aws-doc-sdk-examples
rb
@@ -138,7 +138,7 @@ utils.imageReplacement = function(ResourceModel, src, resources, resourceBaseUrl } const mime = resource.mime ? resource.mime.toLowerCase() : ''; - if (ResourceModel.isSupportedImageMimeType(mime)) { + if (ResourceModel.isSupportedImageMimeType(mime) || ResourceModel.isSupportedAudioMimeType(mime)) { let newSrc = `./${ResourceModel.filename(resource)}`; if (resourceBaseUrl) newSrc = resourceBaseUrl + newSrc; newSrc += `?t=${resource.updated_time}`;
1
const Entities = require('html-entities').AllHtmlEntities; const htmlentities = new Entities().encode; // Imported from models/Resource.js const FetchStatuses = { FETCH_STATUS_IDLE: 0, FETCH_STATUS_STARTED: 1, FETCH_STATUS_DONE: 2, FETCH_STATUS_ERROR: 3, }; const utils = {}; utils.getAttr = function(attrs, name, defaultValue = null) { for (let i = 0; i < attrs.length; i++) { if (attrs[i][0] === name) return attrs[i].length > 1 ? attrs[i][1] : null; } return defaultValue; }; utils.notDownloadedResource = function() { return ` <svg width="1700" height="1536" xmlns="http://www.w3.org/2000/svg"> <path d="M1280 1344c0-35-29-64-64-64s-64 29-64 64 29 64 64 64 64-29 64-64zm256 0c0-35-29-64-64-64s-64 29-64 64 29 64 64 64 64-29 64-64zm128-224v320c0 53-43 96-96 96H96c-53 0-96-43-96-96v-320c0-53 43-96 96-96h465l135 136c37 36 85 56 136 56s99-20 136-56l136-136h464c53 0 96 43 96 96zm-325-569c10 24 5 52-14 70l-448 448c-12 13-29 19-45 19s-33-6-45-19L339 621c-19-18-24-46-14-70 10-23 33-39 59-39h256V64c0-35 29-64 64-64h256c35 0 64 29 64 64v448h256c26 0 49 16 59 39z"/> </svg> `; }; utils.notDownloadedImage = function() { // https://github.com/ForkAwesome/Fork-Awesome/blob/master/src/icons/svg/file-image-o.svg // Height changed to 1795 return ` <svg width="1925" height="1792" xmlns="http://www.w3.org/2000/svg"> <path d="M640 576c0 106-86 192-192 192s-192-86-192-192 86-192 192-192 192 86 192 192zm1024 384v448H256v-192l320-320 160 160 512-512zm96-704H160c-17 0-32 15-32 32v1216c0 17 15 32 32 32h1600c17 0 32-15 32-32V288c0-17-15-32-32-32zm160 32v1216c0 88-72 160-160 160H160c-88 0-160-72-160-160V288c0-88 72-160 160-160h1600c88 0 160 72 160 160z"/> </svg> `; }; utils.notDownloadedFile = function() { // https://github.com/ForkAwesome/Fork-Awesome/blob/master/src/icons/svg/file-o.svg return ` <svg width="1925" height="1792" xmlns="http://www.w3.org/2000/svg"> <path d="M1468 380c37 37 68 111 68 164v1152c0 53-43 96-96 96H96c-53 0-96-43-96-96V96C0 43 43 0 96 0h896c53 0 127 31 164 68zm-444-244v376h376c-6-17-15-34-22-41l-313-313c-7-7-24-16-41-22zm384 1528V640H992c-53 0-96-43-96-96V128H128v1536h1280z"/> </svg> `; }; utils.errorImage = function() { // https://github.com/ForkAwesome/Fork-Awesome/blob/master/src/icons/svg/times-circle.svg return ` <svg width="1795" height="1795" xmlns="http://www.w3.org/2000/svg"> <path d="M1149 1122c0-17-7-33-19-45L949 896l181-181c12-12 19-28 19-45s-7-34-19-46l-90-90c-12-12-29-19-46-19s-33 7-45 19L768 715 587 534c-12-12-28-19-45-19s-34 7-46 19l-90 90c-12 12-19 29-19 46s7 33 19 45l181 181-181 181c-12 12-19 28-19 45s7 34 19 46l90 90c12 12 29 19 46 19s33-7 45-19l181-181 181 181c12 12 28 19 45 19s34-7 46-19l90-90c12-12 19-29 19-46zm387-226c0 424-344 768-768 768S0 1320 0 896s344-768 768-768 768 344 768 768z"/> </svg> `; }; utils.loaderImage = function() { // https://github.com/ForkAwesome/Fork-Awesome/blob/master/src/icons/svg/hourglass-half.svg return ` <svg width="1536" height="1790" xmlns="http://www.w3.org/2000/svg"> <path d="M1408 128c0 370-177 638-373 768 196 130 373 398 373 768h96c18 0 32 14 32 32v64c0 18-14 32-32 32H32c-18 0-32-14-32-32v-64c0-18 14-32 32-32h96c0-370 177-638 373-768-196-130-373-398-373-768H32c-18 0-32-14-32-32V32C0 14 14 0 32 0h1472c18 0 32 14 32 32v64c0 18-14 32-32 32h-96zm-128 0H256c0 146 33 275 85 384h854c52-109 85-238 85-384zm-57 1216c-74-193-207-330-340-384H653c-133 54-266 191-340 384h910z"/> </svg> `; }; utils.resourceStatusImage = function(status) { if (status === 'notDownloaded') return utils.notDownloadedResource(); return utils.resourceStatusFile(status); }; utils.resourceStatusFile = function(status) { if (status === 'notDownloaded') return utils.notDownloadedResource(); if (status === 'downloading') return utils.loaderImage(); if (status === 'encrypted') return utils.loaderImage(); if (status === 'error') return utils.errorImage(); throw new Error(`Unknown status: ${status}`); }; utils.resourceStatusIndex = function(status) { if (status === 'error') return -1; if (status === 'notDownloaded') return 0; if (status === 'downloading') return 1; if (status === 'encrypted') return 2; if (status === 'ready') return 10; throw new Error(`Unknown status: ${status}`); }; utils.resourceStatusName = function(index) { if (index === -1) return 'error'; if (index === 0) return 'notDownloaded'; if (index === 1) return 'downloading'; if (index === 2) return 'encrypted'; if (index === 10) return 'ready'; throw new Error(`Unknown index: ${index}`); }; utils.resourceStatus = function(ResourceModel, resourceInfo) { if (!ResourceModel) return 'ready'; let resourceStatus = 'ready'; if (resourceInfo) { const resource = resourceInfo.item; const localState = resourceInfo.localState; if (localState.fetch_status === FetchStatuses.FETCH_STATUS_IDLE) { resourceStatus = 'notDownloaded'; } else if (localState.fetch_status === FetchStatuses.FETCH_STATUS_STARTED) { resourceStatus = 'downloading'; } else if (localState.fetch_status === FetchStatuses.FETCH_STATUS_DONE) { if (resource.encryption_blob_encrypted || resource.encryption_applied) { resourceStatus = 'encrypted'; } } } else { resourceStatus = 'notDownloaded'; } return resourceStatus; }; utils.imageReplacement = function(ResourceModel, src, resources, resourceBaseUrl) { if (!ResourceModel) return null; if (!ResourceModel.isResourceUrl(src)) return null; const resourceId = ResourceModel.urlToId(src); const result = resources[resourceId]; const resource = result ? result.item : null; const resourceStatus = utils.resourceStatus(ResourceModel, result); if (resourceStatus !== 'ready') { const icon = utils.resourceStatusImage(resourceStatus); return `<div class="not-loaded-resource resource-status-${resourceStatus}" data-resource-id="${resourceId}">` + `<img src="data:image/svg+xml;utf8,${htmlentities(icon)}"/>` + '</div>'; } const mime = resource.mime ? resource.mime.toLowerCase() : ''; if (ResourceModel.isSupportedImageMimeType(mime)) { let newSrc = `./${ResourceModel.filename(resource)}`; if (resourceBaseUrl) newSrc = resourceBaseUrl + newSrc; newSrc += `?t=${resource.updated_time}`; return { 'data-resource-id': resource.id, src: newSrc, }; } return null; }; module.exports = utils;
1
14,063
Why did you have multiple function calls here instead of having one long array of supported mimeTypes? In line with your PR here, I think we might eventually see inline videos or inline pdf. It would be great to generalize this a bit.
laurent22-joplin
js
@@ -37,6 +37,7 @@ from pyspark.sql.types import ( LongType, StringType, TimestampType, + NumericType, ) from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Base and utility classes for Koalas objects. """ from abc import ABCMeta, abstractmethod from collections import OrderedDict import datetime from functools import wraps, partial from typing import Union, Callable, Any import warnings import numpy as np import pandas as pd # noqa: F401 from pandas.api.types import is_list_like from pandas.core.accessor import CachedAccessor from pyspark import sql as spark from pyspark.sql import functions as F, Window, Column from pyspark.sql.types import ( DateType, DoubleType, FloatType, LongType, StringType, TimestampType, ) from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas import numpy_compat from databricks.koalas.internal import ( InternalFrame, NATURAL_ORDER_COLUMN_NAME, SPARK_DEFAULT_INDEX_NAME, ) from databricks.koalas.spark.accessors import SparkIndexOpsMethods from databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype from databricks.koalas.utils import align_diff_series, same_anchor, scol_for, validate_axis from databricks.koalas.frame import DataFrame def booleanize_null(left_scol, scol, f): """ Booleanize Null in Spark Column """ comp_ops = [ getattr(Column, "__{}__".format(comp_op)) for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"] ] if f in comp_ops: # if `f` is "!=", fill null with True otherwise False filler = f == Column.__ne__ scol = F.when(scol.isNull(), filler).otherwise(scol) elif f == Column.__or__: scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol) elif f == Column.__and__: scol = F.when(scol.isNull(), False).otherwise(scol) return scol def column_op(f): """ A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be supported too. If this decorator is used for the `f` function that takes Spark Column and returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas Series. :param f: a function that takes Spark Column and returns Spark Column. :param self: Koalas Series :param args: arguments that the function `f` takes. """ @wraps(f) def wrapper(self, *args): # It is possible for the function `f` takes other arguments than Spark Column. # To cover this case, explicitly check if the argument is Koalas Series and # extract Spark Column. For other arguments, they are used as are. cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)] if all(same_anchor(self, col) for col in cols): # Same DataFrame anchors args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args] scol = f(self.spark.column, *args) scol = booleanize_null(self.spark.column, scol, f) kser = self._with_new_scol(scol) else: # Different DataFrame anchors def apply_func(this_column, *that_columns): scol = f(this_column, *that_columns) return booleanize_null(this_column, scol, f) kser = align_diff_series(apply_func, self, *args, how="full") if not all(self.name == col.name for col in cols): kser = kser.rename() return kser return wrapper def numpy_column_op(f): @wraps(f) def wrapper(self, *args): # PySpark does not support NumPy type out of the box. For now, we convert NumPy types # into some primitive types understandable in PySpark. new_args = [] for arg in args: # TODO: This is a quick hack to support NumPy type. We should revisit this. if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64): new_args.append(float(arg / np.timedelta64(1, "s"))) else: new_args.append(arg) return column_op(f)(self, *new_args) return wrapper class IndexOpsMixin(object, metaclass=ABCMeta): """common ops mixin to support a unified interface / docs for Series / Index Assuming there are following attributes or properties and function. """ @property @abstractmethod def _internal(self) -> InternalFrame: pass @property @abstractmethod def _kdf(self) -> DataFrame: pass @abstractmethod def _with_new_scol(self, scol: spark.Column): pass spark = CachedAccessor("spark", SparkIndexOpsMethods) @property def spark_column(self): warnings.warn( "Series.spark_column is deprecated as of Series.spark.column. " "Please use the API instead.", FutureWarning, ) return self.spark.column spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__ # arithmetic operators __neg__ = column_op(Column.__neg__) def __add__(self, other): if isinstance(self.spark.data_type, StringType): # Concatenate string columns if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType): return column_op(F.concat)(self, other) # Handle df['col'] + 'literal' elif isinstance(other, str): return column_op(F.concat)(self, F.lit(other)) else: raise TypeError("string addition can only be applied to string series or literals.") else: return column_op(Column.__add__)(self, other) def __sub__(self, other): if isinstance(self.spark.data_type, TimestampType): # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction. msg = ( "Note that there is a behavior difference of timestamp subtraction. " "The timestamp subtraction returns an integer in seconds, " "whereas pandas returns 'timedelta64[ns]'." ) if isinstance(other, IndexOpsMixin) and isinstance( other.spark.data_type, TimestampType ): warnings.warn(msg, UserWarning) return self.astype("bigint") - other.astype("bigint") elif isinstance(other, datetime.datetime): warnings.warn(msg, UserWarning) return self.astype("bigint") - F.lit(other).cast(as_spark_type("bigint")) else: raise TypeError("datetime subtraction can only be applied to datetime series.") elif isinstance(self.spark.data_type, DateType): # Note that date subtraction casts arguments to integer. This is to mimic pandas's # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction. msg = ( "Note that there is a behavior difference of date subtraction. " "The date subtraction returns an integer in days, " "whereas pandas returns 'timedelta64[ns]'." ) if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType): warnings.warn(msg, UserWarning) return column_op(F.datediff)(self, other).astype("bigint") elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime): warnings.warn(msg, UserWarning) return column_op(F.datediff)(self, F.lit(other)).astype("bigint") else: raise TypeError("date subtraction can only be applied to date series.") return column_op(Column.__sub__)(self, other) __mul__ = column_op(Column.__mul__) def __truediv__(self, other): """ __truediv__ has different behaviour between pandas and PySpark for several cases. 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf +-------------------------------------------+ | dividend (divisor: 0) | PySpark | pandas | |-----------------------|---------|---------| | np.inf | null | np.inf | | -np.inf | null | -np.inf | | 10 | null | np.inf | | -10 | null | -np.inf | +-----------------------|---------|---------+ """ def truediv(left, right): return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise( F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise( F.lit(np.inf).__div__(left) ) ) return numpy_column_op(truediv)(self, other) def __mod__(self, other): def mod(left, right): return ((left % right) + right) % right return column_op(mod)(self, other) def __radd__(self, other): # Handle 'literal' + df['col'] if isinstance(self.spark.data_type, StringType) and isinstance(other, str): return self._with_new_scol(F.concat(F.lit(other), self.spark.column)) else: return column_op(Column.__radd__)(self, other) def __rsub__(self, other): if isinstance(self.spark.data_type, TimestampType): # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction. msg = ( "Note that there is a behavior difference of timestamp subtraction. " "The timestamp subtraction returns an integer in seconds, " "whereas pandas returns 'timedelta64[ns]'." ) if isinstance(other, datetime.datetime): warnings.warn(msg, UserWarning) return -(self.astype("bigint") - F.lit(other).cast(as_spark_type("bigint"))) else: raise TypeError("datetime subtraction can only be applied to datetime series.") elif isinstance(self.spark.data_type, DateType): # Note that date subtraction casts arguments to integer. This is to mimic pandas's # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction. msg = ( "Note that there is a behavior difference of date subtraction. " "The date subtraction returns an integer in days, " "whereas pandas returns 'timedelta64[ns]'." ) if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime): warnings.warn(msg, UserWarning) return -column_op(F.datediff)(self, F.lit(other)).astype("bigint") else: raise TypeError("date subtraction can only be applied to date series.") return column_op(Column.__rsub__)(self, other) __rmul__ = column_op(Column.__rmul__) def __rtruediv__(self, other): def rtruediv(left, right): return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise( F.lit(right).__truediv__(left) ) return numpy_column_op(rtruediv)(self, other) def __floordiv__(self, other): """ __floordiv__ has different behaviour between pandas and PySpark for several cases. 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf +-------------------------------------------+ | dividend (divisor: 0) | PySpark | pandas | |-----------------------|---------|---------| | np.inf | null | np.inf | | -np.inf | null | -np.inf | | 10 | null | np.inf | | -10 | null | -np.inf | +-----------------------|---------|---------+ """ def floordiv(left, right): return F.when(F.lit(right is np.nan), np.nan).otherwise( F.when( F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right)) ).otherwise( F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise( F.lit(np.inf).__div__(left) ) ) ) return numpy_column_op(floordiv)(self, other) def __rfloordiv__(self, other): def rfloordiv(left, right): return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise( F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left))) ) return numpy_column_op(rfloordiv)(self, other) def __rmod__(self, other): def rmod(left, right): return ((right % left) + left) % left return column_op(rmod)(self, other) __pow__ = column_op(Column.__pow__) __rpow__ = column_op(Column.__rpow__) __abs__ = column_op(F.abs) # comparison operators __eq__ = column_op(Column.__eq__) __ne__ = column_op(Column.__ne__) __lt__ = column_op(Column.__lt__) __le__ = column_op(Column.__le__) __ge__ = column_op(Column.__ge__) __gt__ = column_op(Column.__gt__) # `and`, `or`, `not` cannot be overloaded in Python, # so use bitwise operators as boolean operators __and__ = column_op(Column.__and__) __or__ = column_op(Column.__or__) __invert__ = column_op(Column.__invert__) __rand__ = column_op(Column.__rand__) __ror__ = column_op(Column.__ror__) # NDArray Compat def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any): # Try dunder methods first. result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) # After that, we try with PySpark APIs. if result is NotImplemented: result = numpy_compat.maybe_dispatch_ufunc_to_spark_func( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result else: # TODO: support more APIs? raise NotImplementedError("Koalas objects currently do not support %s." % ufunc) @property def dtype(self): """Return the dtype object of the underlying data. Examples -------- >>> s = ks.Series([1, 2, 3]) >>> s.dtype dtype('int64') >>> s = ks.Series(list('abc')) >>> s.dtype dtype('O') >>> s = ks.Series(pd.date_range('20130101', periods=3)) >>> s.dtype dtype('<M8[ns]') >>> s.rename("a").to_frame().set_index("a").index.dtype dtype('<M8[ns]') """ return spark_type_to_pandas_dtype(self.spark.data_type) @property def empty(self): """ Returns true if the current object is empty. Otherwise, returns false. >>> ks.range(10).id.empty False >>> ks.range(0).id.empty True >>> ks.DataFrame({}, index=list('abc')).index.empty False """ return self._internal.resolved_copy.spark_frame.rdd.isEmpty() @property def hasnans(self): """ Return True if it has any missing values. Otherwise, it returns False. >>> ks.DataFrame({}, index=list('abc')).index.hasnans False >>> ks.Series(['a', None]).hasnans True >>> ks.Series([1.0, 2.0, np.nan]).hasnans True >>> ks.Series([1, 2, 3]).hasnans False >>> (ks.Series([1.0, 2.0, np.nan]) + 1).hasnans True >>> ks.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans False """ sdf = self._internal.spark_frame scol = self.spark.column if isinstance(self.spark.data_type, (DoubleType, FloatType)): return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0] else: return sdf.select(F.max(scol.isNull())).collect()[0][0] @property def is_monotonic(self): """ Return boolean if values in the object are monotonically increasing. .. note:: the current implementation of is_monotonic requires to shuffle and aggregate multiple times to check the order locally and globally, which is potentially expensive. In case of multi-index, all data are transferred to single node which can easily cause out-of-memory error currently. Returns ------- is_monotonic : boolean Examples -------- >>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018']) >>> ser.is_monotonic True >>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']}) >>> df.dates.is_monotonic False >>> df.index.is_monotonic True >>> ser = ks.Series([1]) >>> ser.is_monotonic True >>> ser = ks.Series([]) >>> ser.is_monotonic True >>> ser.rename("a").to_frame().set_index("a").index.is_monotonic True >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5]) >>> ser.is_monotonic False >>> ser.index.is_monotonic True Support for MultiIndex >>> midx = ks.MultiIndex.from_tuples( ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')]) >>> midx # doctest: +SKIP MultiIndex([('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')], ) >>> midx.is_monotonic True >>> midx = ks.MultiIndex.from_tuples( ... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')]) >>> midx # doctest: +SKIP MultiIndex([('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')], ) >>> midx.is_monotonic False """ return self._is_monotonic("increasing") is_monotonic_increasing = is_monotonic @property def is_monotonic_decreasing(self): """ Return boolean if values in the object are monotonically decreasing. .. note:: the current implementation of is_monotonic_decreasing requires to shuffle and aggregate multiple times to check the order locally and globally, which is potentially expensive. In case of multi-index, all data are transferred to single node which can easily cause out-of-memory error currently. Returns ------- is_monotonic : boolean Examples -------- >>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018']) >>> ser.is_monotonic_decreasing True >>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']}) >>> df.dates.is_monotonic_decreasing False >>> df.index.is_monotonic_decreasing False >>> ser = ks.Series([1]) >>> ser.is_monotonic_decreasing True >>> ser = ks.Series([]) >>> ser.is_monotonic_decreasing True >>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing True >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5]) >>> ser.is_monotonic_decreasing True >>> ser.index.is_monotonic_decreasing False Support for MultiIndex >>> midx = ks.MultiIndex.from_tuples( ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')]) >>> midx # doctest: +SKIP MultiIndex([('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')], ) >>> midx.is_monotonic_decreasing False >>> midx = ks.MultiIndex.from_tuples( ... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')]) >>> midx # doctest: +SKIP MultiIndex([('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')], ) >>> midx.is_monotonic_decreasing True """ return self._is_monotonic("decreasing") def _is_locally_monotonic_spark_column(self, order): window = ( Window.partitionBy(F.col("__partition_id")) .orderBy(NATURAL_ORDER_COLUMN_NAME) .rowsBetween(-1, -1) ) if order == "increasing": return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col( "__origin" ).isNotNull() else: return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col( "__origin" ).isNotNull() def _is_monotonic(self, order): assert order in ("increasing", "decreasing") sdf = self._internal.spark_frame sdf = ( sdf.select( F.spark_partition_id().alias( "__partition_id" ), # Make sure we use the same partition id in the whole job. F.col(NATURAL_ORDER_COLUMN_NAME), self.spark.column.alias("__origin"), ) .select( F.col("__partition_id"), F.col("__origin"), self._is_locally_monotonic_spark_column(order).alias( "__comparison_within_partition" ), ) .groupby(F.col("__partition_id")) .agg( F.min(F.col("__origin")).alias("__partition_min"), F.max(F.col("__origin")).alias("__partition_max"), F.min(F.coalesce(F.col("__comparison_within_partition"), F.lit(True))).alias( "__comparison_within_partition" ), ) ) # Now we're windowing the aggregation results without partition specification. # The number of rows here will be as the same of partitions, which is expected # to be small. window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1) if order == "increasing": comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over( window ) else: comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over( window ) sdf = sdf.select( comparison_col.alias("__comparison_between_partitions"), F.col("__comparison_within_partition"), ) ret = sdf.select( F.min(F.coalesce(F.col("__comparison_between_partitions"), F.lit(True))) & F.min(F.coalesce(F.col("__comparison_within_partition"), F.lit(True))) ).collect()[0][0] if ret is None: return True else: return ret @property def ndim(self): """ Return an int representing the number of array dimensions. Return 1 for Series / Index / MultiIndex. Examples -------- For Series >>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8]) >>> s.ndim 1 For Index >>> s.index.ndim 1 For MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [1, 1, 1, 1, 1, 2, 1, 2, 2]]) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) >>> s.index.ndim 1 """ return 1 def astype(self, dtype): """ Cast a Koalas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> ser = ks.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 >>> ser.rename("a").to_frame().set_index("a").index.astype('int64') Int64Index([1, 2], dtype='int64', name='a') """ spark_type = as_spark_type(dtype) if not spark_type: raise ValueError("Type {} not understood".format(dtype)) return self._with_new_scol(self.spark.column.cast(spark_type)) def isin(self, values): """ Check whether `values` are contained in Series. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly. Parameters ---------- values : list or set The sequence of values to test. Returns ------- isin : Series (bool dtype) Examples -------- >>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo'], name='animal') >>> s.isin(['cow', 'lama']) 0 True 1 True 2 True 3 False 4 True 5 False Name: animal, dtype: bool Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool >>> s.rename("a").to_frame().set_index("a").index.isin(['lama']) Index([True, False, True, False, True, False], dtype='object', name='a') """ if not is_list_like(values): raise TypeError( "only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__) ) return self._with_new_scol(self.spark.column.isin(list(values))) def isnull(self): """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings '' or numpy.inf are not considered NA values (unless you set pandas.options.mode.use_inf_as_na = True). Returns ------- Series : Mask of bool values for each element in Series that indicates whether an element is not an NA value. Examples -------- >>> ser = ks.Series([5, 6, np.NaN]) >>> ser.isna() # doctest: +NORMALIZE_WHITESPACE 0 False 1 False 2 True dtype: bool >>> ser.rename("a").to_frame().set_index("a").index.isna() Index([False, False, True], dtype='object', name='a') """ from databricks.koalas.indexes import MultiIndex if isinstance(self, MultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") if isinstance(self.spark.data_type, (FloatType, DoubleType)): return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column)) else: return self._with_new_scol(self.spark.column.isNull()) isna = isnull def notnull(self): """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings '' or numpy.inf are not considered NA values (unless you set pandas.options.mode.use_inf_as_na = True). NA values, such as None or numpy.NaN, get mapped to False values. Returns ------- Series : Mask of bool values for each element in Series that indicates whether an element is not an NA value. Examples -------- Show which entries in a Series are not NA. >>> ser = ks.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool >>> ser.rename("a").to_frame().set_index("a").index.notna() Index([True, True, False], dtype='object', name='a') """ from databricks.koalas.indexes import MultiIndex if isinstance(self, MultiIndex): raise NotImplementedError("notna is not defined for MultiIndex") return (~self.isnull()).rename(self.name) notna = notnull # TODO: axis, skipna, and many arguments should be implemented. def all(self, axis: Union[int, str] = 0) -> bool: """ Return whether all elements are True. Returns True unless there at least one element within a series that is False or equivalent (e.g. zero or empty) Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Examples -------- >>> ks.Series([True, True]).all() True >>> ks.Series([True, False]).all() False >>> ks.Series([0, 1]).all() False >>> ks.Series([1, 2, 3]).all() True >>> ks.Series([True, True, None]).all() True >>> ks.Series([True, False, None]).all() False >>> ks.Series([]).all() True >>> ks.Series([np.nan]).all() True >>> df = ks.Series([True, False, None]).rename("a").to_frame() >>> df.set_index("a").index.all() False """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select(self.spark.column) col = scol_for(sdf, sdf.columns[0]) # Note that we're ignoring `None`s here for now. # any and every was added as of Spark 3.0 # ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0] # Here we use min as its alternative: ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), F.lit(True)))).collect()[0][0] if ret is None: return True else: return ret # TODO: axis, skipna, and many arguments should be implemented. def any(self, axis: Union[int, str] = 0) -> bool: """ Return whether any element is True. Returns False unless there at least one element within a series that is True or equivalent (e.g. non-zero or non-empty). Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Examples -------- >>> ks.Series([False, False]).any() False >>> ks.Series([True, False]).any() True >>> ks.Series([0, 0]).any() False >>> ks.Series([0, 1, 2]).any() True >>> ks.Series([False, False, None]).any() False >>> ks.Series([True, False, None]).any() True >>> ks.Series([]).any() False >>> ks.Series([np.nan]).any() False >>> df = ks.Series([True, False, None]).rename("a").to_frame() >>> df.set_index("a").index.any() True """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select(self.spark.column) col = scol_for(sdf, sdf.columns[0]) # Note that we're ignoring `None`s here for now. # any and every was added as of Spark 3.0 # ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0] # Here we use max as its alternative: ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), F.lit(False)))).collect()[0][0] if ret is None: return False else: return ret # TODO: add frep and axis parameter def shift(self, periods=1, fill_value=None): """ Shift Series/Index by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input Series/Index, shifted. Examples -------- >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.Col1.shift(periods=3) 0 NaN 1 NaN 2 NaN 3 10.0 4 20.0 Name: Col1, dtype: float64 >>> df.Col2.shift(periods=3, fill_value=0) 0 0 1 0 2 0 3 13 4 23 Name: Col2, dtype: int64 >>> df.index.shift(periods=3, fill_value=0) Int64Index([0, 0, 0, 0, 1], dtype='int64') """ return self._shift(periods, fill_value) def _shift(self, periods, fill_value, part_cols=()): if not isinstance(periods, int): raise ValueError("periods should be an int; however, got [%s]" % type(periods)) col = self.spark.column window = ( Window.partitionBy(*part_cols) .orderBy(NATURAL_ORDER_COLUMN_NAME) .rowsBetween(-periods, -periods) ) lag_col = F.lag(col, periods).over(window) col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col) return self._with_new_scol(col) # TODO: Update Documentation for Bins Parameter when its supported def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): """ Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : boolean, default False If True then the object returned will contain the relative frequencies of the unique values. sort : boolean, default True Sort by values. ascending : boolean, default False Sort in ascending order. bins : Not Yet Supported dropna : boolean, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.count: Number of non-NA elements in a Series. Examples -------- For Series >>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]}) >>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE 1.0 3 0.0 2 Name: x, dtype: int64 With `normalize` set to `True`, returns the relative frequency by dividing all values by the sum of values. >>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE 1.0 0.6 0.0 0.4 Name: x, dtype: float64 **dropna** With `dropna` set to `False` we can also see NaN index values. >>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE 1.0 3 0.0 2 NaN 1 Name: x, dtype: int64 For Index >>> idx = ks.Index([3, 1, 2, 3, 4, np.nan]) >>> idx Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64') >>> idx.value_counts().sort_index() 1.0 1 2.0 1 3.0 2 4.0 1 dtype: int64 **sort** With `sort` set to `False`, the result wouldn't be sorted by number of count. >>> idx.value_counts(sort=True).sort_index() 1.0 1 2.0 1 3.0 2 4.0 1 dtype: int64 **normalize** With `normalize` set to `True`, returns the relative frequency by dividing all values by the sum of values. >>> idx.value_counts(normalize=True).sort_index() 1.0 0.2 2.0 0.2 3.0 0.4 4.0 0.2 dtype: float64 **dropna** With `dropna` set to `False` we can also see NaN index values. >>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP 1.0 1 2.0 1 3.0 2 4.0 1 NaN 1 dtype: int64 For MultiIndex. >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [1, 1, 1, 1, 1, 2, 1, 2, 2]]) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) >>> s.index # doctest: +SKIP MultiIndex([( 'lama', 'weight'), ( 'lama', 'weight'), ( 'lama', 'weight'), ( 'cow', 'weight'), ( 'cow', 'weight'), ( 'cow', 'length'), ('falcon', 'weight'), ('falcon', 'length'), ('falcon', 'length')], ) >>> s.index.value_counts().sort_index() (cow, length) 1 (cow, weight) 2 (falcon, length) 2 (falcon, weight) 1 (lama, weight) 3 dtype: int64 >>> s.index.value_counts(normalize=True).sort_index() (cow, length) 0.111111 (cow, weight) 0.222222 (falcon, length) 0.222222 (falcon, weight) 0.111111 (lama, weight) 0.333333 dtype: float64 If Index has name, keep the name up. >>> idx = ks.Index([0, 0, 0, 1, 1, 2, 3], name='koalas') >>> idx.value_counts().sort_index() 0 3 1 2 2 1 3 1 Name: koalas, dtype: int64 """ from databricks.koalas.series import first_series if bins is not None: raise NotImplementedError("value_counts currently does not support bins") if dropna: sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna() else: sdf_dropna = self._internal.spark_frame.select(self.spark.column) index_name = SPARK_DEFAULT_INDEX_NAME column_name = self._internal.data_spark_column_names[0] sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count() if sort: if ascending: sdf = sdf.orderBy(F.col("count")) else: sdf = sdf.orderBy(F.col("count").desc()) if normalize: sum = sdf_dropna.count() sdf = sdf.withColumn("count", F.col("count") / F.lit(sum)) internal = InternalFrame( spark_frame=sdf, index_map=OrderedDict({index_name: None}), column_labels=self._internal.column_labels, data_spark_columns=[scol_for(sdf, "count")], column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal)) def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int: """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to Koalas and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to Koalas. Returns ------- int See Also -------- DataFrame.nunique: Method nunique for DataFrame. Series.count: Count non-NA/null observations in the Series. Examples -------- >>> ks.Series([1, 2, 3, np.nan]).nunique() 3 >>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False) 4 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True) 3 >>> idx = ks.Index([1, 1, 2, None]) >>> idx Float64Index([1.0, 1.0, 2.0, nan], dtype='float64') >>> idx.nunique() 2 >>> idx.nunique(dropna=False) 3 """ res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)]) return res.collect()[0][0] def _nunique(self, dropna=True, approx=False, rsd=0.05): colname = self._internal.data_spark_column_names[0] count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct if dropna: return count_fn(self.spark.column).alias(colname) else: return ( count_fn(self.spark.column) + F.when( F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1 ).otherwise(0) ).alias(colname) def take(self, indices): """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- Series >>> kser = ks.Series([100, 200, 300, 400, 500]) >>> kser 0 100 1 200 2 300 3 400 4 500 dtype: int64 >>> kser.take([0, 2, 4]).sort_index() 0 100 2 300 4 500 dtype: int64 Index >>> kidx = ks.Index([100, 200, 300, 400, 500]) >>> kidx Int64Index([100, 200, 300, 400, 500], dtype='int64') >>> kidx.take([0, 2, 4]).sort_values() Int64Index([100, 300, 500], dtype='int64') MultiIndex >>> kmidx = ks.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c")]) >>> kmidx # doctest: +SKIP MultiIndex([('x', 'a'), ('x', 'b'), ('x', 'c')], ) >>> kmidx.take([0, 2]) # doctest: +SKIP MultiIndex([('x', 'a'), ('x', 'c')], ) """ if not is_list_like(indices) or isinstance(indices, (dict, set)): raise ValueError("`indices` must be a list-like except dict or set") if isinstance(self, ks.Series): result = self.iloc[indices] elif isinstance(self, ks.Index): result = self._kdf.iloc[indices].index return result
1
16,515
nit: shall we keep alphabetical order?
databricks-koalas
py
@@ -106,6 +106,15 @@ export default class Realm { let method = util.createMethod(objectTypes.REALM, 'objects'); return method.apply(this, [type, ...args]); } + + objectForPrimaryKey(type, ...args) { + if (typeof type == 'function') { + type = objects.typeForConstructor(this[keys.realm], type); + } + + let method = util.createMethod(objectTypes.REALM, 'objectForPrimaryKey'); + return method.apply(this, [type, ...args]); + } } // Non-mutating methods:
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// 'use strict'; import { NativeModules } from 'react-native'; import { keys, objectTypes } from './constants'; import Collection, * as collections from './collections'; import List, { createList } from './lists'; import Results, { createResults } from './results'; import RealmObject, * as objects from './objects'; import * as rpc from './rpc'; import * as util from './util'; const {debugHosts, debugPort} = NativeModules.Realm; rpc.registerTypeConverter(objectTypes.LIST, createList); rpc.registerTypeConverter(objectTypes.RESULTS, createResults); rpc.registerTypeConverter(objectTypes.OBJECT, objects.createObject); rpc.registerTypeConverter(objectTypes.REALM, createRealm); function createRealm(_, info) { let realm = Object.create(Realm.prototype); setupRealm(realm, info.id); return realm; } function setupRealm(realm, realmId) { realm[keys.id] = realmId; realm[keys.realm] = realmId; realm[keys.type] = objectTypes.REALM; [ 'path', 'readOnly', 'schema', 'schemaVersion', ].forEach((name) => { Object.defineProperty(realm, name, {get: util.getterForProperty(name)}); }); } export default class Realm { constructor(config) { let schemas = typeof config == 'object' && config.schema; let constructors = schemas ? {} : null; for (let i = 0, len = schemas ? schemas.length : 0; i < len; i++) { let item = schemas[i]; if (typeof item == 'function') { let schema = item.schema; if (!schema || typeof schema != 'object') { throw new Error("Realm object constructor must have 'schema' property"); } let {name, properties} = schema; if (!name || typeof name != 'string') { throw new Error("Realm object schema must have 'name' property"); } else if (!properties || typeof properties != 'object') { throw new Error("Realm object schema must have 'properties' property"); } schemas.splice(i, 1, schema); constructors[name] = item; } } let realmId = rpc.createRealm(Array.from(arguments)); setupRealm(this, realmId); // This will create mappings between the id, path, and potential constructors. objects.registerConstructors(realmId, this.path, constructors); } create(type, ...args) { if (typeof type == 'function') { type = objects.typeForConstructor(this[keys.realm], type); } let method = util.createMethod(objectTypes.REALM, 'create', true); return method.apply(this, [type, ...args]); } objects(type, ...args) { if (typeof type == 'function') { type = objects.typeForConstructor(this[keys.realm], type); } let method = util.createMethod(objectTypes.REALM, 'objects'); return method.apply(this, [type, ...args]); } } // Non-mutating methods: util.createMethods(Realm.prototype, objectTypes.REALM, [ 'addListener', 'removeListener', 'removeAllListeners', 'close', ]); // Mutating methods: util.createMethods(Realm.prototype, objectTypes.REALM, [ 'delete', 'deleteAll', 'write', ], true); Object.defineProperties(Realm, { Collection: { value: Collection, }, List: { value: List, }, Results: { value: Results, }, Object: { value: RealmObject, }, defaultPath: { get: util.getterForProperty('defaultPath'), set: util.setterForProperty('defaultPath'), }, schemaVersion: { value: function(path, encryptionKey) { return rpc.callMethod(undefined, Realm[keys.id], 'schemaVersion', Array.from(arguments)); } }, copyBundledRealmFiles: { value: function() { return rpc.callMethod(undefined, Realm[keys.id], 'copyBundledRealmFiles', []); } }, clearTestState: { value: function() { collections.clearMutationListeners(); objects.clearRegisteredConstructors(); rpc.clearTestState(); }, }, }); for (let i = 0, len = debugHosts.length; i < len; i++) { try { // The session ID refers to the Realm constructor object in the RPC server. Realm[keys.id] = rpc.createSession(debugHosts[i] + ':' + debugPort); break; } catch (e) { // Only throw exception after all hosts have been tried. if (i < len - 1) { continue; } // Log the original exception for debugging purposes. console.error(e); throw new Error( 'Realm failed to connect to the embedded debug server inside the app. ' + 'If attempting to use Chrome debugging from a device, ensure the device is ' + 'reachable on the same network as this machine.' ); } }
1
15,475
We could just move the check for function into `typeForConstruction` to make this a one liner every time we need to do this.
realm-realm-js
js
@@ -153,7 +153,7 @@ public class I18nTest extends JUnit4TestBase { assertThat(ime.isActivated()).isTrue(); assertThat(ime.getActiveEngine()).isEqualTo(desiredEngine); - // Send the Romaji for "Tokyo". The space at the end instructs the IME to convert the word. + // Send the Romaji for "Tokyo". The space at the end instructs the IME to transform the word. input.sendKeys("toukyou "); input.sendKeys(Keys.ENTER);
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; import static org.openqa.selenium.testing.drivers.Browser.CHROME; import static org.openqa.selenium.testing.drivers.Browser.FIREFOX; import static org.openqa.selenium.testing.drivers.Browser.HTMLUNIT; import static org.openqa.selenium.testing.drivers.Browser.IE; import static org.openqa.selenium.testing.drivers.Browser.MARIONETTE; import org.junit.Test; import org.openqa.selenium.environment.GlobalTestEnvironment; import org.openqa.selenium.testing.Ignore; import org.openqa.selenium.testing.JUnit4TestBase; import org.openqa.selenium.testing.NotYetImplemented; import org.openqa.selenium.testing.TestUtilities; import java.util.List; public class I18nTest extends JUnit4TestBase { /** * The Hebrew word shalom (peace) encoded in order Shin (sh) Lamed (L) Vav (O) final-Mem (M). */ private static final String shalom = "\u05E9\u05DC\u05D5\u05DD"; /** * The Hebrew word tmunot (images) encoded in order Taf (t) Mem (m) Vav (u) Nun (n) Vav (o) Taf * (t). */ private static final String tmunot = "\u05EA\u05DE\u05D5\u05E0\u05D5\u05EA"; /** * Japanese for "Tokyo" */ private static final String tokyo = "\u6771\u4EAC"; /** * Chinese for "The Voice of China" */ private static final String theVoiceOfChina = "\u4E2D\u56FD\u4E4B\u58F0"; @Test public void testCn() { driver.get(pages.chinesePage); driver.findElement(By.linkText(theVoiceOfChina)).click(); } @Test public void testEnteringHebrewTextFromLeftToRight() { driver.get(pages.chinesePage); WebElement input = driver.findElement(By.name("i18n")); input.sendKeys(shalom); assertThat(input.getAttribute("value")).isEqualTo(shalom); } @Test public void testEnteringHebrewTextFromRightToLeft() { driver.get(pages.chinesePage); WebElement input = driver.findElement(By.name("i18n")); input.sendKeys(tmunot); assertThat(input.getAttribute("value")).isEqualTo(tmunot); } @Test @Ignore(value = CHROME, reason = "ChromeDriver only supports characters in the BMP") public void testEnteringSupplementaryCharacters() { assumeFalse("IE: versions less thank 10 have issue 5069", TestUtilities.isInternetExplorer(driver) && TestUtilities.getIEVersion(driver) < 10); driver.get(pages.chinesePage); String input = ""; input += new String(Character.toChars(0x20000)); input += new String(Character.toChars(0x2070E)); input += new String(Character.toChars(0x2000B)); input += new String(Character.toChars(0x2A190)); input += new String(Character.toChars(0x2A6B2)); WebElement el = driver.findElement(By.name("i18n")); el.sendKeys(input); assertThat(el.getAttribute("value")).isEqualTo(input); } @Test public void testShouldBeAbleToReturnTheTextInAPage() { String url = GlobalTestEnvironment.get() .getAppServer() .whereIs("encoding"); driver.get(url); String text = driver.findElement(By.tagName("body")).getText(); assertThat(text).isEqualTo(shalom); } @Test @Ignore(IE) @Ignore(CHROME) @Ignore(FIREFOX) @Ignore(MARIONETTE) @NotYetImplemented(HTMLUNIT) public void testShouldBeAbleToActivateIMEEngine() throws InterruptedException { assumeTrue("IME is supported on Linux only.", TestUtilities.getEffectivePlatform().is(Platform.LINUX)); driver.get(pages.formPage); WebElement input = driver.findElement(By.id("working")); // Activate IME. By default, this keycode activates IBus input for Japanese. WebDriver.ImeHandler ime = driver.manage().ime(); List<String> engines = ime.getAvailableEngines(); String desiredEngine = "anthy"; if (!engines.contains(desiredEngine)) { System.out.println("Desired engine " + desiredEngine + " not available, skipping test."); return; } ime.activateEngine(desiredEngine); int totalWaits = 0; while (!ime.isActivated() && (totalWaits < 10)) { Thread.sleep(500); totalWaits++; } assertThat(ime.isActivated()).isTrue(); assertThat(ime.getActiveEngine()).isEqualTo(desiredEngine); // Send the Romaji for "Tokyo". The space at the end instructs the IME to convert the word. input.sendKeys("toukyou "); input.sendKeys(Keys.ENTER); String elementValue = input.getAttribute("value"); ime.deactivate(); assertThat(ime.isActivated()).isFalse(); // IME is not present. Don't fail because of that. But it should have the Romaji value // instead. assertThat(elementValue) .describedAs("The elemnt's value should either remain in Romaji or be converted properly.") .isEqualTo(tokyo); } @Test @Ignore(IE) @Ignore(CHROME) @Ignore(FIREFOX) public void testShouldBeAbleToInputJapanese() { assumeTrue("IME is supported on Linux only.", TestUtilities.getEffectivePlatform().is(Platform.LINUX)); driver.get(pages.formPage); WebElement input = driver.findElement(By.id("working")); // Activate IME. By default, this keycode activates IBus input for Japanese. input.sendKeys(Keys.ZENKAKU_HANKAKU); // Send the Romaji for "Tokyo". The space at the end instructs the IME to convert the word. input.sendKeys("toukyou "); String elementValue = input.getAttribute("value"); // Turn OFF IME input first. input.sendKeys(Keys.ZENKAKU_HANKAKU); // IME is not present. Don't fail because of that. But it should have the Romaji value // instead. assertThat(elementValue) .describedAs("The element's value should either remain in Romaji or be converted properly.") .isIn(tokyo, "\uE040" + "toukyou ", "toukyou "); } }
1
16,514
A correct by unrelated change. We'll slide this one in without another word ;)
SeleniumHQ-selenium
py
@@ -40,6 +40,9 @@ Solver<Dtype>::Solver(const SolverParameter& param) template <typename Dtype> void Solver<Dtype>::Solve(const char* resume_file) { Caffe::set_mode(Caffe::Brew(param_.solver_mode())); + if (param_.solver_mode()) { + Caffe::SetDevice(param_.device_id()); + } Caffe::set_phase(Caffe::TRAIN); LOG(INFO) << "Solving " << net_->name(); PreSolve();
1
// Copyright Yangqing Jia 2013 #include <cstdio> #include <algorithm> #include <string> #include <vector> #include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> Solver<Dtype>::Solver(const SolverParameter& param) : param_(param), net_(), test_net_() { // Scaffolding code NetParameter train_net_param; ReadProtoFromTextFile(param_.train_net(), &train_net_param); LOG(INFO) << "Creating training net."; net_.reset(new Net<Dtype>(train_net_param)); if (param_.has_test_net()) { LOG(INFO) << "Creating testing net."; NetParameter test_net_param; ReadProtoFromTextFile(param_.test_net(), &test_net_param); test_net_.reset(new Net<Dtype>(test_net_param)); CHECK_GT(param_.test_iter(), 0); CHECK_GT(param_.test_interval(), 0); } LOG(INFO) << "Solver scaffolding done."; } template <typename Dtype> void Solver<Dtype>::Solve(const char* resume_file) { Caffe::set_mode(Caffe::Brew(param_.solver_mode())); Caffe::set_phase(Caffe::TRAIN); LOG(INFO) << "Solving " << net_->name(); PreSolve(); iter_ = 0; if (resume_file) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); } // For a network that is trained by the solver, no bottom or top vecs // should be given, and we will just provide dummy vecs. vector<Blob<Dtype>*> bottom_vec; while (iter_++ < param_.max_iter()) { Dtype loss = net_->ForwardBackward(bottom_vec); ComputeUpdateValue(); net_->Update(); if (param_.display() && iter_ % param_.display() == 0) { LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; } if (param_.test_interval() && iter_ % param_.test_interval() == 0) { // We need to set phase to test before running. Caffe::set_phase(Caffe::TEST); Test(); Caffe::set_phase(Caffe::TRAIN); } // Check if we need to do snapshot if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } } // After the optimization is done, always do a snapshot. iter_--; Snapshot(); LOG(INFO) << "Optimization Done."; } template <typename Dtype> void Solver<Dtype>::Test() { LOG(INFO) << "Testing net"; NetParameter net_param; net_->ToProto(&net_param); CHECK_NOTNULL(test_net_.get())->CopyTrainedLayersFrom(net_param); vector<Dtype> test_score; vector<Blob<Dtype>*> bottom_vec; for (int i = 0; i < param_.test_iter(); ++i) { const vector<Blob<Dtype>*>& result = test_net_->Forward(bottom_vec); if (i == 0) { for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); for (int k = 0; k < result[j]->count(); ++k) { test_score.push_back(result_vec[k]); } } } else { int idx = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); for (int k = 0; k < result[j]->count(); ++k) { test_score[idx++] += result_vec[k]; } } } } for (int i = 0; i < test_score.size(); ++i) { LOG(INFO) << "Test score #" << i << ": " << test_score[i] / param_.test_iter(); } } template <typename Dtype> void Solver<Dtype>::Snapshot() { NetParameter net_param; // For intermediate results, we will also dump the gradient values. net_->ToProto(&net_param, param_.snapshot_diff()); string filename(param_.snapshot_prefix()); char iter_str_buffer[20]; sprintf(iter_str_buffer, "_iter_%d", iter_); filename += iter_str_buffer; LOG(INFO) << "Snapshotting to " << filename; WriteProtoToBinaryFile(net_param, filename.c_str()); SolverState state; SnapshotSolverState(&state); state.set_iter(iter_); state.set_learned_net(filename); filename += ".solverstate"; LOG(INFO) << "Snapshotting solver state to " << filename; WriteProtoToBinaryFile(state, filename.c_str()); } template <typename Dtype> void Solver<Dtype>::Restore(const char* state_file) { SolverState state; NetParameter net_param; ReadProtoFromBinaryFile(state_file, &state); if (state.has_learned_net()) { ReadProtoFromBinaryFile(state.learned_net().c_str(), &net_param); net_->CopyTrainedLayersFrom(net_param); } iter_ = state.iter(); RestoreSolverState(state); } // Return the current learning rate. The currently implemented learning rate // policies are as follows: // - fixed: always return base_lr. // - step: return base_lr * gamma ^ (floor(iter / step)) // - exp: return base_lr * gamma ^ iter // - inv: return base_lr * (1 + gamma * iter) ^ (- power) // where base_lr, gamma, step and power are defined in the solver parameter // protocol buffer, and iter is the current iteration. template <typename Dtype> Dtype SGDSolver<Dtype>::GetLearningRate() { Dtype rate; const string& lr_policy = this->param_.lr_policy(); if (lr_policy == "fixed") { rate = this->param_.base_lr(); } else if (lr_policy == "step") { int current_step = this->iter_ / this->param_.stepsize(); rate = this->param_.base_lr() * pow(this->param_.gamma(), current_step); } else if (lr_policy == "exp") { rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); } else if (lr_policy == "inv") { rate = this->param_.base_lr() * pow(Dtype(1) + this->param_.gamma() * this->iter_, - this->param_.power()); } else { LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; } return rate; } template <typename Dtype> void SGDSolver<Dtype>::PreSolve() { // Initialize the history vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params(); history_.clear(); for (int i = 0; i < net_params.size(); ++i) { const Blob<Dtype>* net_param = net_params[i].get(); history_.push_back(shared_ptr<Blob<Dtype> >(new Blob<Dtype>( net_param->num(), net_param->channels(), net_param->height(), net_param->width()))); } } template <typename Dtype> void SGDSolver<Dtype>::ComputeUpdateValue() { vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params(); vector<float>& net_params_lr = this->net_->params_lr(); vector<float>& net_params_weight_decay = this->net_->params_weight_decay(); // get the learning rate Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } Dtype momentum = this->param_.momentum(); Dtype weight_decay = this->param_.weight_decay(); switch (Caffe::mode()) { case Caffe::CPU: for (int param_id = 0; param_id < net_params.size(); ++param_id) { // Compute the value to history, and then copy them to the blob's diff. Dtype local_rate = rate * net_params_lr[param_id]; Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; caffe_axpby(net_params[param_id]->count(), local_rate, net_params[param_id]->cpu_diff(), momentum, history_[param_id]->mutable_cpu_data()); if (local_decay) { // add weight decay caffe_axpy(net_params[param_id]->count(), local_decay * local_rate, net_params[param_id]->cpu_data(), history_[param_id]->mutable_cpu_data()); } // copy caffe_copy(net_params[param_id]->count(), history_[param_id]->cpu_data(), net_params[param_id]->mutable_cpu_diff()); } break; case Caffe::GPU: for (int param_id = 0; param_id < net_params.size(); ++param_id) { // Compute the value to history, and then copy them to the blob's diff. Dtype local_rate = rate * net_params_lr[param_id]; Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; caffe_gpu_axpby(net_params[param_id]->count(), local_rate, net_params[param_id]->gpu_diff(), momentum, history_[param_id]->mutable_gpu_data()); if (local_decay) { // add weight decay caffe_gpu_axpy(net_params[param_id]->count(), local_decay * local_rate, net_params[param_id]->gpu_data(), history_[param_id]->mutable_gpu_data()); } // copy caffe_gpu_copy(net_params[param_id]->count(), history_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); } break; default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template <typename Dtype> void SGDSolver<Dtype>::SnapshotSolverState(SolverState* state) { state->clear_history(); for (int i = 0; i < history_.size(); ++i) { // Add history BlobProto* history_blob = state->add_history(); history_[i]->ToProto(history_blob); } } template <typename Dtype> void SGDSolver<Dtype>::RestoreSolverState(const SolverState& state) { CHECK_EQ(state.history_size(), history_.size()) << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; for (int i = 0; i < history_.size(); ++i) { history_[i]->FromProto(state.history(i)); } } INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); } // namespace caffe
1
26,999
I feel that a slightly safer way is to do if (param_.has_device_id()) { Caffe::SetDevice(param_.device_id()); } just in case a user has hard-coded a device id outside the solver and does not specify the device id in the solver param. Currently, if nothing is set, the solver will always use the 0th device, which might not be desired.
BVLC-caffe
cpp
@@ -321,6 +321,7 @@ class TestSeleniumScriptBuilder(SeleniumTestCase): "closeWindow('that_window')", "submitByName(\"toPort\")", "scriptEval(\"alert('This is Sparta');\")", + "rawCode(multi(1)\n line(2)\n block(33))", {"dragByID(address)": "elementByName(toPort)"}, "switchFrameByName('my_frame')", "switchFrameByIdx(1)",
1
import json import os import tempfile import time from bzt import TaurusConfigError from bzt.engine import ScenarioExecutor from bzt.modules.functional import FuncSamplesReader, LoadSamplesReader, FunctionalAggregator from bzt.modules.python import ApiritifNoseExecutor, PyTestExecutor, RobotExecutor from bzt.modules.python.executors import ApiritifLoadReader, ApiritifFuncReader from tests import RESOURCES_DIR, ExecutorTestCase from tests.modules.selenium import SeleniumTestCase class TestSeleniumNoseRunner(SeleniumTestCase): def test_selenium_prepare_python_single(self): """ Check if script exists in working dir :return: """ self.obj.execution.merge({"scenario": { "script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py" }}) self.obj.prepare() def test_selenium_prepare_python_folder(self): """ Check if scripts exist in working dir :return: """ self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}}) self.obj.prepare() def test_selenium_startup_shutdown_python_single(self): """ run tests from .py file :return: """ self.configure({ 'execution': { "iterations": 1, 'scenario': {'script': RESOURCES_DIR + 'selenium/python/'}, 'executor': 'selenium' }, 'reporting': [{'module': 'junit-xml'}] }) self.obj.execution.merge({"scenario": { "script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py" }}) self.obj.prepare() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv"))) def test_selenium_startup_shutdown_python_folder(self): """ run tests from .py files :return: """ self.configure({ 'execution': { 'iterations': 1, 'scenario': {'script': RESOURCES_DIR + 'selenium/python/'}, 'executor': 'selenium' }, 'reporting': [{'module': 'junit-xml'}] }) self.obj.prepare() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv"))) def test_runner_fail_no_test_found(self): """ Check that Python Nose runner fails if no tests were found :return: """ self.configure({ ScenarioExecutor.EXEC: { "iterations": 1, "executor": "selenium", "scenario": {"script": RESOURCES_DIR + "selenium/invalid/dummy.py"} } }) self.obj.prepare() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() diagnostics = "\n".join(self.obj.get_error_diagnostics()) self.assertIn("Nothing to test.", diagnostics) def test_resource_files_collection_remote_nose(self): self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}}) self.assertEqual(len(self.obj.resource_files()), 1) def test_setup_exception(self): """ Do not crash when test's setUp/setUpClass fails :return: """ self.obj.execution.merge({"scenario": { "script": RESOURCES_DIR + "selenium/python/test_setup_exception.py" }}) self.obj.engine.aggregator = FunctionalAggregator() self.obj.prepare() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) diagnostics = "\n".join(self.obj.get_error_diagnostics()) self.assertIn("Nothing to test", diagnostics) def test_long_iterations_value(self): self.obj.execution.merge({ "iterations": 2 ** 64, "scenario": { "requests": [ "http://blazedemo.com/", ], } }) self.obj.prepare() try: self.obj.startup() for _ in range(3): self.assertFalse(self.obj.check()) time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() class TestNoseRunner(ExecutorTestCase): EXECUTOR = ApiritifNoseExecutor def test_full_single_script(self): self.obj.engine.check_interval = 0.1 self.obj.execution.merge({ "iterations": 1, "ramp-up": "10s", "hold-for": "10s", "steps": 5, "scenario": { "script": RESOURCES_DIR + "apiritif/test_codegen.py"}}) self.obj.prepare() self.obj.get_widget() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) def test_apiritif_generated_requests(self): self.configure({ "execution": [{ "test-mode": "apiritif", "iterations": 1, "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/", "/reserve.php"]}}]}) self.obj.prepare() self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "test_requests.py"))) try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertNotEquals(self.obj.process, None) def test_apiritif_transactions(self): self.configure({ "execution": [{ "test-mode": "apiritif", "iterations": 1, "scenario": { "script": RESOURCES_DIR + "apiritif/test_transactions.py" } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertNotEquals(self.obj.process, None) def test_report_reading(self): reader = FuncSamplesReader(RESOURCES_DIR + "apiritif/transactions.ldjson", self.obj.engine, self.obj.log) items = list(reader.read(last_pass=True)) self.assertEqual(9, len(items)) self.assertEqual(items[0].get_short_name(), 'TestRequests.test_1_single_request') self.assertEqual(items[1].get_short_name(), 'TestRequests.test_2_multiple_requests') self.assertEqual(items[2].get_short_name(), 'test_3_toplevel_transaction.Transaction') self.assertEqual(items[3].get_short_name(), 'test_4_mixed_transaction.Transaction') self.assertEqual(items[4].get_short_name(), 'test_5_multiple_transactions.Transaction 1') self.assertEqual(items[5].get_short_name(), 'test_5_multiple_transactions.Transaction 2') self.assertEqual(items[6].get_short_name(), 'test_6_transaction_obj.Label') self.assertEqual(items[7].get_short_name(), 'test_7_transaction_fail.Label') self.assertEqual(items[8].get_short_name(), 'test_8_transaction_attach.Label') def test_report_transactions_as_failed(self): self.configure({ "execution": [{ "test-mode": "apiritif", "iterations": 1, "scenario": { "default-address": "http://httpbin.org", "requests": [{ "label": "failure by 404", "url": "/status/404", }] } }] }) self.obj.engine.aggregator = FunctionalAggregator() self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertNotEquals(self.obj.process, None) reader = LoadSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.ldjson"), self.obj.log) samples = list(reader._read(last_pass=True)) self.assertEqual(len(samples), 1) tstmp, label, concur, rtm, cnn, ltc, rcd, error, trname, byte_count = samples[0] self.assertIsNotNone(error) def test_status_skipped(self): self.configure({ "execution": [{ "iterations": 1, "scenario": { "script": RESOURCES_DIR + "functional/test_all.py" } }] }) self.obj.engine.aggregator = FunctionalAggregator() self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() reader = FuncSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.ldjson"), self.obj.engine, self.obj.log) samples = list(reader.read(last_pass=True)) self.assertEqual(len(samples), 4) self.assertIsNotNone(samples[-1].status) class TestSeleniumScriptBuilder(SeleniumTestCase): def test_build_script(self): self.configure({ "execution": [{ "executor": "selenium", "hold-for": "4m", "ramp-up": "3m", "scenario": "loc_sc"}], "scenarios": { "loc_sc": { "default-address": "http://blazedemo.com", "variables": { "red_pill": "take_it", "name": "Name" }, "timeout": "3.5s", "requests": [{ "url": "/", "assert": [{ "contains": ['contained_text'], "not": True }], "actions": [ "waitByXPath(//input[@type='submit'])", "assertTitle(BlazeDemo)", "mouseMoveByXPath(/html/body/div[2]/div/p[2]/a)", "doubleClickByXPath(/html/body/div[3]/h2)", "mouseDownByXPath(/html/body/div[3]/form/select[1])", "mouseUpByXPath(/html/body/div[3]/form/select[1]/option[6])", {"selectByName(toPort)": "London"}, {"keysByCSS(body input.btn.btn-primary)": "KEY_ENTER"}, {"assertValueByID(address)": "123 Beautiful st."}, {"assertTextByXPath(/html/body/div[2]/form/div[1]/label)": "${name}"}, {"waitByName('toPort')": "visible"}, {"keysByName(\"toPort\")": "B"}, {"typeByName(\"toPort\")": "B"}, {"keysByName(\"toPort\")": u"KEY_ENTER"}, {"typeByName(\"toPort\")": "KEY_ENTER"}, "clickByXPath(//div[3]/form/select[1]//option[3])", "clickByXPath(//div[3]/form/select[2]//option[6])", "switchWindow(0)", "openWindow(some.url)", "switchWindow('win_ser_local')", "switchWindow('win_ser_1')", "switchWindow('that_window')", "closeWindow(1)", "closeWindow('win_ser_local')", "closeWindow('win_ser_1')", "closeWindow('that_window')", "submitByName(\"toPort\")", "scriptEval(\"alert('This is Sparta');\")", {"dragByID(address)": "elementByName(toPort)"}, "switchFrameByName('my_frame')", "switchFrameByIdx(1)", "switchFrame(relative=parent)", {"editContentById(editor)": "lo-la-lu"}, "pauseFor(3.5s)", "clearCookies()", "clickByLinkText(destination of the week! The Beach!)", {"storeTitle()": "Title"}, {"storeTextByXPath(//*[@id='basics']/h2)": "Basic"}, {"storeValueByXPath(//*[@id='basics']/h1)": "World"}, {"storeString(${Title} ${Basic} by ${By})": "Final"}, "go(http:\\blazemeter.com)", "echoString(${red_pill})", "screenshot(screen.png)", "screenshot()", ], }, {"label": "empty"} ] }, "loc_sc_remote": { "remote": "http://user:key@remote_web_driver_host:port/wd/hub", "capabilities": [ { "browser": "firefox", "version": "54.0", "platform": "linux", "javascript": "True", "os_version": "", "selenium": "", "device": "", "app": "" } ], "default-address": "http://blazedemo.com", "timeout": "3.5s", "requests": [{ "url": "/", "assert": [{ "contains": ['contained_text'], "not": True }], "actions": [ "waitByXPath(//input[@type='submit'])", "assertTitle(BlazeDemo)" ], }, {"label": "empty"} ] } } }) self.obj.prepare() self.assertFilesEqual(self.obj.script, RESOURCES_DIR + "selenium/generated_from_requests.py", (self.obj.engine.artifacts_dir + os.path.sep).replace('\\', '\\\\'), "<somewhere>") def test_headless_default(self): self.configure({ "execution": [{ "executor": "selenium", "scenario": "loc_sc"}], "scenarios": { "loc_sc": { "browser": "Chrome", "requests": ["http://blazedemo.com/"] }}}) self.obj.prepare() with open(self.obj.script) as generated: gen_contents = generated.read() self.assertNotIn("options.set_headless()", gen_contents) def test_headless_chrome(self): self.configure({ "execution": [{ "executor": "selenium", "scenario": "loc_sc"}], "scenarios": { "loc_sc": { "browser": "Chrome", "headless": True, "requests": ["http://blazedemo.com/"] }}}) self.obj.prepare() with open(self.obj.script) as generated: gen_contents = generated.read() self.assertIn("options.set_headless()", gen_contents) def test_headless_firefox(self): self.configure({ "execution": [{ "executor": "selenium", "scenario": "loc_sc"}], "scenarios": { "loc_sc": { "browser": "Firefox", "headless": True, "requests": ["http://blazedemo.com/"] }}}) self.obj.prepare() with open(self.obj.script) as generated: gen_contents = generated.read() self.assertIn("options.set_headless()", gen_contents) def test_headless_safari(self): self.configure({ "execution": [{ "executor": "selenium", "scenario": "loc_sc"}], "scenarios": { "loc_sc": { "browser": "Opera", "headless": True, "requests": ["http://blazedemo.com/"] }}}) self.obj.prepare() with open(self.obj.script) as generated: gen_contents = generated.read() self.assertNotIn("options.set_headless()", gen_contents) def test_build_script_remote(self): self.configure({ "execution": [{ "executor": "selenium", "hold-for": "4m", "ramp-up": "3m", "scenario": "loc_sc_remote"}], "scenarios": { "loc_sc_remote": { "remote": "http://user:key@remote_web_driver_host:port/wd/hub", "capabilities": [ { "browser": "firefox", "version": "54.0", "platform": "linux", "javascript": "True", "os_version": "", "selenium": "", "device": "", "app": "" } ], "default-address": "http://blazedemo.com", "timeout": "3.5s", "requests": [{ "url": "/", "assert": [{ "contains": ['contained_text'], "not": True }], "actions": [ "waitByXPath(//input[@type='submit'])", "assertTitle(BlazeDemo)" ], }, {"label": "empty"} ] } } }) self.obj.prepare() self.assertFilesEqual(self.obj.script, RESOURCES_DIR + "selenium/generated_from_requests_remote.py") def test_build_script_appium_browser(self): self.configure({ "execution": [{ "executor": "selenium", "hold-for": "4m", "ramp-up": "3m", "scenario": "loc_sc_appium"}], "scenarios": { "loc_sc_appium": { "browser": "Chrome-Android", "capabilities": [ { "device": "", } ], "default-address": "http://blazedemo.com", "timeout": "3.5s", "requests": [{ "url": "/", "assert": [{ "contains": ['contained_text'], "not": True }], "actions": [ "waitByXPath(//input[@type='submit'])", "assertTitle(BlazeDemo)" ], }, {"label": "empty"} ] } } }) self.obj.prepare() self.assertFilesEqual(self.obj.script, RESOURCES_DIR + "selenium/generated_from_requests_appium_browser.py") def test_build_script_flow_markers(self): self.configure({ "execution": [{ "executor": "selenium", "hold-for": "4m", "ramp-up": "3m", "scenario": "loc_sc"}], "scenarios": { "loc_sc": { "generate-flow-markers": True, "browser": "Chrome", "default-address": "http://blazedemo.com", "timeout": "3.5s", "requests": [{ "url": "/", "assert": [{ "contains": ['contained_text'], "not": True }], "actions": [ "waitByXPath(//input[@type='submit'])", "assertTitle(BlazeDemo)" ], }, {"label": "empty"} ] } } }) self.obj.prepare() self.assertFilesEqual(self.obj.script, RESOURCES_DIR + "selenium/generated_from_requests_flow_markers.py", (self.obj.engine.artifacts_dir + os.path.sep).replace('\\', '\\\\'), "<somewhere>") class TestApiritifScriptGenerator(ExecutorTestCase): EXECUTOR = ApiritifNoseExecutor def test_keepalive_default(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("target.keep_alive(True)", test_script) def test_keepalive(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "keepalive": False, "requests": [ "/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("target.keep_alive(False)", test_script) def test_timeout_default(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertNotIn("timeout=30.0", test_script) def test_timeout(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "timeout": "10s", "default-address": "http://blazedemo.com", "requests": [ "/?tag=1", { "url": "/?tag=2", "timeout": "2s", } ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("target.timeout(10.0)", test_script) self.assertNotIn("get('/?tag=1', timeout=10.0", test_script) self.assertIn("get('/?tag=2', timeout=2.0", test_script) def test_timeout_notarget(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "timeout": "10s", "requests": [ "http://blazedemo.com/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("get('http://blazedemo.com/', timeout=10.0", test_script) def test_think_time(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ { "url": "/?tag=2", "think-time": "1s500ms", } ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("time.sleep(1.5)", test_script) def test_methods(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ {"url": "/?tag=get", "method": "GET"}, {"url": "/?tag=post", "method": "POST"}, {"url": "/?tag=put", "method": "PUT"}, {"url": "/?tag=patch", "method": "PATCH"}, {"url": "/?tag=head", "method": "HEAD"}, {"url": "/?tag=delete", "method": "DELETE"}, ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("get('/?tag=get'", test_script) self.assertIn("post('/?tag=post'", test_script) self.assertIn("put('/?tag=put'", test_script) self.assertIn("patch('/?tag=patch'", test_script) self.assertIn("head('/?tag=head'", test_script) self.assertIn("delete('/?tag=delete'", test_script) def test_default_address_path_prefix(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "https://a.blazemeter.com", "base-path": "/api/latest", "requests": [ "/user", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("target('https://a.blazemeter.com')", test_script) self.assertIn("target.base_path('/api/latest')", test_script) def test_headers(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "headers": {"X-Foo": "foo"}, "requests": [{ "url": "/", "headers": {"X-Bar": "bar"} }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("'X-Foo': 'foo'", test_script) self.assertIn("'X-Bar': 'bar'", test_script) def test_follow_redirects_default(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("target.allow_redirects(True)", test_script) self.assertNotIn("allow_redirects=True", test_script) def test_follow_redirects(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "follow-redirects": False, }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("allow_redirects=False", test_script) def test_body_params(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "body": { "foo": "bar", }, }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("params={\n 'foo': 'bar',\n }", test_script) def test_body_json(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "headers": { "Content-Type": "application/json", }, "body": { "foo": "bar", }, }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("json={\n 'foo': 'bar',\n }", test_script) def test_body_string(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "body": "MY PERFECT BODY" }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("data='MY PERFECT BODY'", test_script) def test_body_unknown(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "body": 123 }] } }] }) self.assertRaises(TaurusConfigError, self.obj.prepare) def test_plain_assertions(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "assert": [ "Welcome", "Simple Travel Agency" ] }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("response.assert_regex_in_body('Welcome')", test_script) self.assertIn("response.assert_regex_in_body('Simple Travel Agency')", test_script) def test_plain_assertion_kinds(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [{ "url": "/", "assert": [ {"contains": ["1"], "regexp": False, "not": False}, {"contains": ["2"], "regexp": False, "not": True}, {"contains": ["3"], "regexp": True, "not": False}, {"contains": ["4"], "regexp": True, "not": True}, {"contains": ["5"], "regexp": False, "not": False, "subject": "headers"}, {"contains": ["6"], "regexp": False, "not": True, "subject": "headers"}, {"contains": ["7"], "regexp": True, "not": False, "subject": "headers"}, {"contains": ["8"], "regexp": True, "not": True, "subject": "headers"}, {"contains": ["8"], "regexp": True, "not": True, "subject": "headers"}, {"contains": ["9"], "not": False, "subject": "http-code"}, {"contains": ["10"], "not": True, "subject": "http-code"}, ] }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("assert_in_body('1')", test_script) self.assertIn("assert_not_in_body('2')", test_script) self.assertIn("assert_regex_in_body('3')", test_script) self.assertIn("assert_regex_not_in_body('4')", test_script) self.assertIn("assert_in_headers('5')", test_script) self.assertIn("assert_not_in_headers('6')", test_script) self.assertIn("assert_regex_in_headers('7')", test_script) self.assertIn("assert_regex_not_in_headers('8')", test_script) self.assertIn("assert_status_code('9')", test_script) self.assertIn("assert_not_status_code('10')", test_script) def test_jsonpath_assertions(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "https://api.github.com", "requests": [{ "url": "/", "assert-jsonpath": [ "$.foo.bar" ] }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("assert_jsonpath('$.foo.bar', expected_value=None)", test_script) def test_jsonpath_assertions_kinds(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "https://api.github.com", "requests": [{ "url": "/", "assert-jsonpath": [ {"jsonpath": "$.1", "invert": False}, {"jsonpath": "$.2", "invert": True}, {"jsonpath": "$.3", "expected-value": "value"}, ] }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("assert_jsonpath('$.1', expected_value=None)", test_script) self.assertIn("assert_not_jsonpath('$.2', expected_value=None)", test_script) self.assertIn("assert_jsonpath('$.3', expected_value='value')", test_script) def test_xpath_assertions(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "https://api.github.com", "requests": [{ "url": "/", "assert-xpath": [ "//head/title" ] }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("assert_xpath('//head/title', parser_type='html', validate=False)", test_script) def test_xpath_assertions_kinds(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "https://api.github.com", "requests": [{ "url": "/", "assert-xpath": [ {"xpath": "//1", "invert": False}, {"xpath": "//2", "invert": True}, {"xpath": "//3", "validate-xml": True}, {"xpath": "//4", "validate-xml": False, "use-tolerant-parser": False}, ] }] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.assertIn("assert_xpath('//1', parser_type='html', validate=False)", test_script) self.assertIn("assert_not_xpath('//2', parser_type='html', validate=False)", test_script) self.assertIn("assert_xpath('//3', parser_type='html', validate=True)", test_script) self.assertIn("assert_xpath('//4', parser_type='xml', validate=False)", test_script) def test_complex_codegen(self): """ This test serves code review purposes, to make changes more visible """ self.obj.engine.config.load([RESOURCES_DIR + 'apiritif/test_codegen.yml']) self.configure(self.obj.engine.config['execution'][0]) self.obj.settings['verbose'] = True self.obj.prepare() exp_file = RESOURCES_DIR + 'apiritif/test_codegen.py' # import shutil; shutil.copy2(self.obj.script, exp_file) # keep this comment to ease updates self.assertFilesEqual(self.obj.script, exp_file) def test_jmeter_functions_time(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/?time=${__time()}", "/?time=${__time(MM/dd/yy)}", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("'/?time={}'.format(apiritif.format_date())", test_script) self.assertIn("'/?time={}'.format(apiritif.format_date('MM/dd/yy'))", test_script) def test_jmeter_functions_random(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/?random=${__Random(1, 10)}", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("'/?random={}'.format(apiritif.random_uniform(1, 10))", test_script) def test_jmeter_functions_random_string(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/?rs=${__RandomString(3)}", "/?rs=${__RandomString(4,abcdef)}", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("'/?rs={}'.format(apiritif.random_string(3))", test_script) self.assertIn("'/?rs={}'.format(apiritif.random_string(4, 'abcdef'))", test_script) def test_jmeter_functions_base64_encode(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "headers": { "Authorization": "Basic ${__base64Encode(user:pass)}", }, "requests": [ "/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("base64_encode('user:pass')", test_script) def test_jmeter_functions_base64_decode(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "headers": { "Additional": "${__base64Decode(dGVzdCBzdHJpbmc=)}", }, "requests": [ "/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("base64_decode('dGVzdCBzdHJpbmc=')", test_script) def test_jmeter_functions_urlencode(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/${__urlencode(Foo Bar Baz)}", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("encode_url('Foo Bar Baz')", test_script) def test_jmeter_functions_uuid(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "requests": [ "/${__UUID()}", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("uuid()", test_script) def test_load_reader(self): reader = ApiritifLoadReader(self.obj.log) # add empty reader with tempfile.NamedTemporaryFile() as f_name: reader.register_file(f_name.name) items = list(reader._read()) self.assertEqual(len(items), 0) self.assertFalse(reader.read_records) reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl") items = list(reader._read()) self.assertEqual(len(items), 2) reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl") reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl") items = list(reader._read()) self.assertTrue(reader.read_records) self.assertEqual(len(items), 4) def test_func_reader(self): reader = ApiritifFuncReader(self.obj.engine, self.obj.log) items = list(reader.read()) self.assertEqual(len(items), 0) reader.register_file(RESOURCES_DIR + "apiritif/transactions.ldjson") reader.register_file(RESOURCES_DIR + "apiritif/transactions.ldjson") items = list(reader.read()) self.assertEqual(len(items), 18) def test_codegen_requests(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "requests": [{ "url": "http://localhost:8000/", "label": "apiritif", }] } }] }) self.obj.prepare() self.assertFilesEqual(self.obj.script, RESOURCES_DIR + "/apiritif/test_codegen_requests.py") def test_generator_crash(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "default-address": "http://blazedemo.com", "variables": { "product_id": "5b6c", }, "requests": [{ "url": "/", "method": "POST", "body": { "product": "${product_id}" # notice the space } }] } }] }) self.obj.prepare() # Unparser shouldn't crash with AttributeError because of malformed AST with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("data=[('product', vars['product_id'])]", test_script) def test_inherit_test_case(self): self.configure({ "execution": [{ "test-mode": "apiritif", "scenario": { "requests": [ "http://example.com/", ] } }] }) self.obj.prepare() with open(self.obj.script) as fds: test_script = fds.read() self.obj.log.info(test_script) self.assertIn("class TestAPI(unittest.TestCase)", test_script) class TestPyTestExecutor(ExecutorTestCase): EXECUTOR = PyTestExecutor def test_full_single_script(self): self.obj.execution.merge({ "iterations": 1, "scenario": { "script": RESOURCES_DIR + "selenium/pytest/test_statuses.py" } }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) def test_statuses(self): self.obj.execution.merge({ "scenario": { "script": RESOURCES_DIR + "selenium/pytest/test_statuses.py" } }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() with open(self.obj.report_file) as fds: report = [json.loads(line) for line in fds.readlines() if line] self.assertEqual(4, len(report)) self.assertEqual(["PASSED", "FAILED", "FAILED", "SKIPPED"], [item["status"] for item in report]) failed_item = report[1] assertions = failed_item["assertions"] self.assertEqual(1, len(assertions)) assertion = assertions[0] self.assertEqual('assert (2 + (2 * 2)) == 8', assertion['error_msg']) self.assertTrue(assertion['failed']) self.assertEqual('AssertionError: assert (2 + (2 * 2)) == 8', assertion['name']) self.assertIsNotNone(assertion.get('error_trace')) def test_iterations(self): self.obj.execution.merge({ "iterations": 10, "scenario": { "script": RESOURCES_DIR + "selenium/pytest/test_single.py" } }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() with open(self.obj.report_file) as fds: report = [json.loads(line) for line in fds.readlines() if line] self.assertEqual(10, len(report)) self.assertTrue(all(item["status"] == "PASSED" for item in report)) def test_hold(self): self.obj.execution.merge({ "hold-for": "3s", "scenario": { "script": RESOURCES_DIR + "selenium/pytest/test_single.py" } }) self.obj.prepare() try: start_time = time.time() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() end_time = time.time() self.obj.post_process() duration = end_time - start_time self.assertGreaterEqual(duration, 3.0) def test_blazedemo(self): self.obj.engine.check_interval = 0.1 self.obj.execution.merge({ "scenario": { "script": RESOURCES_DIR + "selenium/pytest/test_blazedemo.py" } }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() with open(self.obj.report_file) as fds: report = [json.loads(line) for line in fds.readlines() if line] self.assertEqual(2, len(report)) def test_package(self): self.obj.engine.check_interval = 0.1 self.obj.execution.merge({ "scenario": { "script": RESOURCES_DIR + "selenium/pytest/" } }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() with open(self.obj.report_file) as fds: report = [json.loads(line) for line in fds.readlines() if line] self.assertEqual(7, len(report)) def test_additional_args(self): additional_args = "--foo --bar" self.obj.execution.merge({ "scenario": { "additional-args": additional_args, "script": RESOURCES_DIR + "selenium/pytest/test_single.py" } }) self.obj.runner_path = RESOURCES_DIR + "selenium/pytest/bin/runner.py" self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() with open(self.obj.stdout.name) as fds: stdout = fds.read() self.assertIn(additional_args, stdout) class TestRobotExecutor(ExecutorTestCase): EXECUTOR = RobotExecutor def test_full_single_script(self): self.configure({ "execution": [{ "scenario": { "script": RESOURCES_DIR + "selenium/robot/simple/test.robot" } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) lines = open(self.obj.report_file).readlines() self.assertEqual(5, len(lines)) def test_hold(self): self.configure({ "execution": [{ "hold-for": "5s", "scenario": { "script": RESOURCES_DIR + "selenium/robot/simple/test.robot" } }] }) self.obj.prepare() try: start_time = time.time() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertTrue(os.path.exists(self.obj.report_file)) duration = time.time() - start_time self.assertGreater(duration, 5) def test_iterations(self): self.configure({ "execution": [{ "iterations": 3, "scenario": { "script": RESOURCES_DIR + "selenium/robot/simple/test.robot" } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) lines = open(self.obj.report_file).readlines() self.assertEqual(3 * 5, len(lines)) def test_variables(self): self.configure({ "execution": [{ "iterations": 1, "scenario": { "variables": { "USERNAME": "janedoe", }, "script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot", } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) samples = [json.loads(line) for line in open(self.obj.report_file).readlines() if line] self.obj.log.info(samples) self.assertEqual(5, len(samples)) self.assertTrue(all(sample["status"] == "PASSED" for sample in samples)) def test_variables_file(self): self.configure({ "execution": [{ "iterations": 1, "scenario": { "variables": RESOURCES_DIR + "selenium/robot/simple/vars.yaml", "script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot", } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) samples = [json.loads(line) for line in open(self.obj.report_file).readlines() if line] self.obj.log.info(samples) self.assertEqual(5, len(samples)) self.assertTrue(all(sample["status"] == "PASSED" for sample in samples)) def test_single_tag(self): self.configure({ "execution": [{ "iterations": 1, "scenario": { "tags": "create", "script": RESOURCES_DIR + "selenium/robot/simple/test.robot", } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) samples = [json.loads(line) for line in open(self.obj.report_file).readlines() if line] self.obj.log.info(samples) self.assertEqual(1, len(samples)) self.assertTrue(all(sample["status"] == "PASSED" for sample in samples)) def test_multiple_tags(self): self.configure({ "execution": [{ "iterations": 1, "scenario": { "tags": "create,database", "script": RESOURCES_DIR + "selenium/robot/simple/test.robot", } }] }) self.obj.prepare() try: self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) finally: self.obj.shutdown() self.obj.post_process() self.assertFalse(self.obj.has_results()) self.assertNotEquals(self.obj.process, None) samples = [json.loads(line) for line in open(self.obj.report_file).readlines() if line] self.obj.log.info(samples) self.assertEqual(2, len(samples)) self.assertTrue(all(sample["status"] == "PASSED" for sample in samples))
1
15,285
let's replace this example with something that won't fail in Python. Like "for+if+print"
Blazemeter-taurus
py
@@ -268,7 +268,8 @@ def _to_graphviz(tree_info, show_info, feature_names, precision=None, raise ImportError('You must install graphviz to plot tree.') def float2str(value, precision=None): - return "{0:.{1}f}".format(value, precision) if precision is not None else str(value) + return "{0:.{1}f}".format(value, precision) \ + if (precision is not None) and (not isinstance(value, str)) else str(value) def add(root, parent=None, decision=None): """recursively add node or edge"""
1
# coding: utf-8 # pylint: disable = C0103 """Plotting Library.""" from __future__ import absolute_import import warnings from copy import deepcopy from io import BytesIO import numpy as np from .basic import Booster from .compat import MATPLOTLIB_INSTALLED, GRAPHVIZ_INSTALLED from .sklearn import LGBMModel def check_not_tuple_of_2_elements(obj, obj_name='obj'): """check object is not tuple or does not have 2 elements""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError('%s must be a tuple of 2 elements.' % obj_name) def plot_importance(booster, ax=None, height=0.2, xlim=None, ylim=None, title='Feature importance', xlabel='Feature importance', ylabel='Features', importance_type='split', max_num_features=None, ignore_zero=True, figsize=None, grid=True, **kwargs): """Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. **kwargs : other parameters Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot importance.') if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') importance = booster.feature_importance(importance_type=importance_type) feature_name = booster.feature_name() if not len(importance): raise ValueError("Booster's feature_importance is empty.") tuples = sorted(zip(feature_name, importance), key=lambda x: x[1]) if ignore_zero: tuples = [x for x in tuples if x[1] > 0] if max_num_features is not None and max_num_features > 0: tuples = tuples[-max_num_features:] labels, values = zip(*tuples) if ax is None: if figsize is not None: check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) for x, y in zip(values, ylocs): ax.text(x + 1, y, x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax def plot_metric(booster, metric=None, dataset_names=None, ax=None, xlim=None, ylim=None, title='Metric during training', xlabel='Iterations', ylabel='auto', figsize=None, grid=True): """Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : string or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics have various scales. If None, first metric picked from dictionary (according to hashcode). dataset_names : list of strings or None, optional (default=None) List of the dataset names which are used to calculate metric to plot. If None, all datasets are used. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Metric during training") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Iterations") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="auto") Y-axis title label. If 'auto', metric name is used. If None, title is disabled. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. Returns ------- ax : matplotlib.axes.Axes The plot with metric's history over the training. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot metric.') if isinstance(booster, LGBMModel): eval_results = deepcopy(booster.evals_result_) elif isinstance(booster, dict): eval_results = deepcopy(booster) else: raise TypeError('booster must be dict or LGBMModel.') num_data = len(eval_results) if not num_data: raise ValueError('eval results cannot be empty.') if ax is None: if figsize is not None: check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) if dataset_names is None: dataset_names = iter(eval_results.keys()) elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names: raise ValueError('dataset_names should be iterable and cannot be empty') else: dataset_names = iter(dataset_names) name = next(dataset_names) # take one as sample metrics_for_one = eval_results[name] num_metric = len(metrics_for_one) if metric is None: if num_metric > 1: msg = """more than one metric available, picking one to plot.""" warnings.warn(msg, stacklevel=2) metric, results = metrics_for_one.popitem() else: if metric not in metrics_for_one: raise KeyError('No given metric in eval results.') results = metrics_for_one[metric] num_iteration, max_result, min_result = len(results), max(results), min(results) x_ = range(num_iteration) ax.plot(x_, results, label=name) for name in dataset_names: metrics_for_one = eval_results[name] results = metrics_for_one[metric] max_result, min_result = max(max(results), max_result), min(min(results), min_result) ax.plot(x_, results, label=name) ax.legend(loc='best') if xlim is not None: check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, num_iteration) ax.set_xlim(xlim) if ylim is not None: check_not_tuple_of_2_elements(ylim, 'ylim') else: range_result = max_result - min_result ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2) ax.set_ylim(ylim) if ylabel == 'auto': ylabel = metric if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax def _to_graphviz(tree_info, show_info, feature_names, precision=None, name=None, comment=None, filename=None, directory=None, format=None, engine=None, encoding=None, graph_attr=None, node_attr=None, edge_attr=None, body=None, strict=False): """Convert specified tree to graphviz instance. See: - http://graphviz.readthedocs.io/en/stable/api.html#digraph """ if GRAPHVIZ_INSTALLED: from graphviz import Digraph else: raise ImportError('You must install graphviz to plot tree.') def float2str(value, precision=None): return "{0:.{1}f}".format(value, precision) if precision is not None else str(value) def add(root, parent=None, decision=None): """recursively add node or edge""" if 'split_index' in root: # non-leaf name = 'split{0}'.format(root['split_index']) if feature_names is not None: label = 'split_feature_name: {0}'.format(feature_names[root['split_feature']]) else: label = 'split_feature_index: {0}'.format(root['split_feature']) label += r'\nthreshold: {0}'.format(float2str(root['threshold'], precision)) for info in show_info: if info in {'split_gain', 'internal_value'}: label += r'\n{0}: {1}'.format(info, float2str(root[info], precision)) elif info == 'internal_count': label += r'\n{0}: {1}'.format(info, root[info]) graph.node(name, label=label) if root['decision_type'] == '<=': l_dec, r_dec = '<=', '>' elif root['decision_type'] == '==': l_dec, r_dec = 'is', "isn't" else: raise ValueError('Invalid decision type in tree model.') add(root['left_child'], name, l_dec) add(root['right_child'], name, r_dec) else: # leaf name = 'leaf{0}'.format(root['leaf_index']) label = 'leaf_index: {0}'.format(root['leaf_index']) label += r'\nleaf_value: {0}'.format(float2str(root['leaf_value'], precision)) if 'leaf_count' in show_info: label += r'\nleaf_count: {0}'.format(root['leaf_count']) graph.node(name, label=label) if parent is not None: graph.edge(parent, name, decision) graph = Digraph(name=name, comment=comment, filename=filename, directory=directory, format=format, engine=engine, encoding=encoding, graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr, body=body, strict=strict) add(tree_info['tree_structure']) return graph def create_tree_digraph(booster, tree_index=0, show_info=None, precision=None, name=None, comment=None, filename=None, directory=None, format=None, engine=None, encoding=None, graph_attr=None, node_attr=None, edge_attr=None, body=None, strict=False): """Create a digraph representation of specified tree. Note ---- For more information please visit http://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance. tree_index : int, optional (default=0) The index of a target tree to convert. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. name : string or None, optional (default=None) Graph name used in the source code. comment : string or None, optional (default=None) Comment added to the first line of the source. filename : string or None, optional (default=None) Filename for saving the source. If None, ``name`` + '.gv' is used. directory : string or None, optional (default=None) (Sub)directory for source saving and rendering. format : string or None, optional (default=None) Rendering output format ('pdf', 'png', ...). engine : string or None, optional (default=None) Layout command used ('dot', 'neato', ...). encoding : string or None, optional (default=None) Encoding for saving the source. graph_attr : dict, list of tuples or None, optional (default=None) Mapping of (attribute, value) pairs set for the graph. All attributes and values must be strings or bytes-like objects. node_attr : dict, list of tuples or None, optional (default=None) Mapping of (attribute, value) pairs set for all nodes. All attributes and values must be strings or bytes-like objects. edge_attr : dict, list of tuples or None, optional (default=None) Mapping of (attribute, value) pairs set for all edges. All attributes and values must be strings or bytes-like objects. body : list of strings or None, optional (default=None) Lines to add to the graph body. strict : bool, optional (default=False) Whether rendering should merge multi-edges. Returns ------- graph : graphviz.Digraph The digraph representation of specified tree. """ if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') model = booster.dump_model() tree_infos = model['tree_info'] if 'feature_names' in model: feature_names = model['feature_names'] else: feature_names = None if tree_index < len(tree_infos): tree_info = tree_infos[tree_index] else: raise IndexError('tree_index is out of range.') if show_info is None: show_info = [] graph = _to_graphviz(tree_info, show_info, feature_names, precision, name=name, comment=comment, filename=filename, directory=directory, format=format, engine=engine, encoding=encoding, graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr, body=body, strict=strict) return graph def plot_tree(booster, ax=None, tree_index=0, figsize=None, graph_attr=None, node_attr=None, edge_attr=None, show_info=None, precision=None): """Plot specified tree. Note ---- It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. graph_attr : dict, list of tuples or None, optional (default=None) Mapping of (attribute, value) pairs set for the graph. All attributes and values must be strings or bytes-like objects. node_attr : dict, list of tuples or None, optional (default=None) Mapping of (attribute, value) pairs set for all nodes. All attributes and values must be strings or bytes-like objects. edge_attr : dict, list of tuples or None, optional (default=None) Mapping of (attribute, value) pairs set for all edges. All attributes and values must be strings or bytes-like objects. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. Returns ------- ax : matplotlib.axes.Axes The plot with single tree. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt import matplotlib.image as image else: raise ImportError('You must install matplotlib to plot tree.') if ax is None: if figsize is not None: check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) graph = create_tree_digraph( booster=booster, tree_index=tree_index, show_info=show_info, precision=precision, graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr ) s = BytesIO() s.write(graph.pipe(format='png')) s.seek(0) img = image.imread(s) ax.imshow(img) ax.axis('off') return ax
1
18,505
Please remove the brackets and use `string_type` from `compat.py` module instead of `str` in `isinstance()`.
microsoft-LightGBM
cpp
@@ -36,16 +36,11 @@ type Execution struct { } // NewExecution returns a Execution instance -func NewExecution(executorAddress string, contractAddress string, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) (*Execution, error) { - if executorAddress == "" { - return nil, errors.Wrap(ErrAddress, "address of the executor is empty") - } - +func NewExecution(contractAddress string, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) (*Execution, error) { return &Execution{ AbstractAction: AbstractAction{ version: version.ProtocolVersion, nonce: nonce, - srcAddr: executorAddress, dstAddr: contractAddress, gasLimit: gasLimit, gasPrice: gasPrice,
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "math" "math/big" "github.com/golang/protobuf/proto" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/util/byteutil" "github.com/iotexproject/iotex-core/pkg/version" iproto "github.com/iotexproject/iotex-core/proto" ) const ( // EmptyAddress is the empty string EmptyAddress = "" // ExecutionDataGas represents the execution data gas per uint ExecutionDataGas = uint64(100) // ExecutionBaseIntrinsicGas represents the base intrinsic gas for execution ExecutionBaseIntrinsicGas = uint64(10000) ) // Execution defines the struct of account-based contract execution type Execution struct { AbstractAction amount *big.Int data []byte } // NewExecution returns a Execution instance func NewExecution(executorAddress string, contractAddress string, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) (*Execution, error) { if executorAddress == "" { return nil, errors.Wrap(ErrAddress, "address of the executor is empty") } return &Execution{ AbstractAction: AbstractAction{ version: version.ProtocolVersion, nonce: nonce, srcAddr: executorAddress, dstAddr: contractAddress, gasLimit: gasLimit, gasPrice: gasPrice, }, amount: amount, data: data, }, nil } // Executor returns an executor address func (ex *Execution) Executor() string { return ex.SrcAddr() } // ExecutorPublicKey returns the executor's public key func (ex *Execution) ExecutorPublicKey() keypair.PublicKey { return ex.SrcPubkey() } // Contract returns a contract address func (ex *Execution) Contract() string { return ex.DstAddr() } // Amount returns the amount func (ex *Execution) Amount() *big.Int { return ex.amount } // Data returns the data bytes func (ex *Execution) Data() []byte { return ex.data } // TotalSize returns the total size of this Execution func (ex *Execution) TotalSize() uint32 { size := ex.BasicActionSize() if ex.amount != nil && len(ex.amount.Bytes()) > 0 { size += uint32(len(ex.amount.Bytes())) } return size + uint32(len(ex.data)) } // ByteStream returns a raw byte stream of this Transfer func (ex *Execution) ByteStream() []byte { return byteutil.Must(proto.Marshal(ex.Proto())) } // Proto converts Execution to protobuf's ExecutionPb func (ex *Execution) Proto() *iproto.ExecutionPb { act := &iproto.ExecutionPb{ Contract: ex.dstAddr, Data: ex.data, } if ex.amount != nil && len(ex.amount.Bytes()) > 0 { act.Amount = ex.amount.Bytes() } return act } // LoadProto converts a protobuf's ExecutionPb to Execution func (ex *Execution) LoadProto(pbAct *iproto.ExecutionPb) error { if pbAct == nil { return errors.New("empty action proto to load") } if ex == nil { return errors.New("nil action to load proto") } *ex = Execution{} ex.data = pbAct.GetData() ex.amount = &big.Int{} ex.amount.SetBytes(pbAct.GetAmount()) return nil } // IntrinsicGas returns the intrinsic gas of an execution func (ex *Execution) IntrinsicGas() (uint64, error) { dataSize := uint64(len(ex.Data())) if (math.MaxUint64-ExecutionBaseIntrinsicGas)/ExecutionDataGas < dataSize { return 0, ErrOutOfGas } return dataSize*ExecutionDataGas + ExecutionBaseIntrinsicGas, nil } // Cost returns the cost of an execution func (ex *Execution) Cost() (*big.Int, error) { maxExecFee := big.NewInt(0).Mul(ex.GasPrice(), big.NewInt(0).SetUint64(ex.GasLimit())) return big.NewInt(0).Add(ex.Amount(), maxExecFee), nil }
1
15,029
line is 143 characters (from `lll`)
iotexproject-iotex-core
go
@@ -94,7 +94,11 @@ func NewController(params Params) (types.Controller, error) { for i := 0; i < items.Len(); i++ { item := items.Index(i).Addr().Interface().(InnerObjectWithSelector) for _, record := range item.GetStatus().Experiment.Records { - if controller.ParseNamespacedName(record.Id) == objName { + namespacedName, err := controller.ParseNamespacedName(record.Id) + if err != nil { + setupLog.Error(err, "failed to parse record", "record", record.Id) + } + if namespacedName == objName { id := k8sTypes.NamespacedName{ Namespace: item.GetObjectMeta().Namespace, Name: item.GetObjectMeta().Name,
1
// Copyright 2021 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package common import ( "context" "reflect" "github.com/go-logr/logr" "go.uber.org/fx" "k8s.io/apimachinery/pkg/runtime" k8sTypes "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/chaos-mesh/chaos-mesh/controllers/types" "github.com/chaos-mesh/chaos-mesh/controllers/utils/builder" "github.com/chaos-mesh/chaos-mesh/controllers/utils/controller" "github.com/chaos-mesh/chaos-mesh/controllers/utils/recorder" "github.com/chaos-mesh/chaos-mesh/pkg/selector" ) type ChaosImplPair struct { Name string Object InnerObjectWithSelector Impl ChaosImpl ObjectList runtime.Object Controlls []runtime.Object } type Params struct { fx.In Mgr ctrl.Manager Client client.Client Logger logr.Logger Selector *selector.Selector RecorderBuilder *recorder.RecorderBuilder Impls []*ChaosImplPair `group:"impl"` Reader client.Reader `name:"no-cache"` } func NewController(params Params) (types.Controller, error) { logger := params.Logger pairs := params.Impls mgr := params.Mgr client := params.Client reader := params.Reader selector := params.Selector recorderBuilder := params.RecorderBuilder setupLog := logger.WithName("setup-common") for _, pair := range pairs { setupLog.Info("setting up controller", "resource-name", pair.Name) builder := builder.Default(mgr). For(pair.Object). Named(pair.Name + "-records") // Add owning resources if len(pair.Controlls) > 0 { pair := pair for _, obj := range pair.Controlls { builder = builder.Watches(&source.Kind{Type: obj}, &handler.EnqueueRequestsFromMapFunc{ ToRequests: handler.ToRequestsFunc(func(obj handler.MapObject) []reconcile.Request { reqs := []reconcile.Request{} objName := k8sTypes.NamespacedName{ Namespace: obj.Meta.GetNamespace(), Name: obj.Meta.GetName(), } list := pair.ObjectList.DeepCopyObject() err := client.List(context.TODO(), list) if err != nil { setupLog.Error(err, "fail to list object") } items := reflect.ValueOf(list).Elem().FieldByName("Items") for i := 0; i < items.Len(); i++ { item := items.Index(i).Addr().Interface().(InnerObjectWithSelector) for _, record := range item.GetStatus().Experiment.Records { if controller.ParseNamespacedName(record.Id) == objName { id := k8sTypes.NamespacedName{ Namespace: item.GetObjectMeta().Namespace, Name: item.GetObjectMeta().Name, } setupLog.Info("mapping requests", "source", objName, "target", id) reqs = append(reqs, reconcile.Request{ NamespacedName: id, }) } } } return reqs }), }) } } err := builder.Complete(&Reconciler{ Impl: pair.Impl, Object: pair.Object, Client: client, Reader: reader, Recorder: recorderBuilder.Build("records"), Selector: selector, Log: logger.WithName("records"), }) if err != nil { return "", err } } return "records", nil }
1
23,523
It should follow a `continue`.
chaos-mesh-chaos-mesh
go
@@ -18,6 +18,7 @@ services: gid: '{{ .GID }}' image: ${DDEV_DBIMAGE}-${DDEV_SITENAME}-built stop_grace_period: 60s + working_dir: "{{ .WebWorkingDir }}" volumes: - type: "volume" source: mariadb-database
1
package ddevapp // DDevComposeTemplate is used to create the main docker-compose file // file for a ddev project. const DDevComposeTemplate = `version: '{{ .ComposeVersion }}' {{ .DdevGenerated }} services: {{if not .OmitDB }} db: container_name: {{ .Plugin }}-${DDEV_SITENAME}-db build: context: '{{ .DBBuildContext }}' dockerfile: '{{ .DBBuildDockerfile }}' args: BASE_IMAGE: $DDEV_DBIMAGE username: '{{ .Username }}' uid: '{{ .UID }}' gid: '{{ .GID }}' image: ${DDEV_DBIMAGE}-${DDEV_SITENAME}-built stop_grace_period: 60s volumes: - type: "volume" source: mariadb-database target: "/var/lib/mysql" volume: nocopy: true - type: "bind" source: "." target: "/mnt/ddev_config" - ddev-global-cache:/mnt/ddev-global-cache restart: "{{ if .AutoRestartContainers }}always{{ else }}no{{ end }}" user: "$DDEV_UID:$DDEV_GID" hostname: {{ .Name }}-db ports: - "{{ .DockerIP }}:$DDEV_HOST_DB_PORT:3306" labels: com.ddev.site-name: ${DDEV_SITENAME} com.ddev.platform: {{ .Plugin }} com.ddev.app-type: {{ .AppType }} com.ddev.approot: $DDEV_APPROOT environment: - COLUMNS - DDEV_HOSTNAME - DDEV_PHP_VERSION - DDEV_PRIMARY_URL - DDEV_PROJECT - DDEV_PROJECT_TYPE - DDEV_ROUTER_HTTP_PORT - DDEV_ROUTER_HTTPS_PORT - DDEV_SITENAME - DDEV_TLD - DOCKER_IP={{ .DockerIP }} - HOST_DOCKER_INTERNAL_IP={{ .HostDockerInternalIP }} - IS_DDEV_PROJECT=true - LINES - MYSQL_HISTFILE=/mnt/ddev-global-cache/mysqlhistory/${DDEV_SITENAME}-db/mysql_history - TZ={{ .Timezone }} command: "$DDEV_MARIADB_LOCAL_COMMAND" healthcheck: interval: 1s retries: 120 start_period: 120s timeout: 120s {{end}} web: container_name: {{ .Plugin }}-${DDEV_SITENAME}-web build: context: '{{ .WebBuildContext }}' dockerfile: '{{ .WebBuildDockerfile }}' args: BASE_IMAGE: $DDEV_WEBIMAGE username: '{{ .Username }}' uid: '{{ .UID }}' gid: '{{ .GID }}' image: ${DDEV_WEBIMAGE}-${DDEV_SITENAME}-built cap_add: - SYS_PTRACE volumes: {{ if not .NoProjectMount }} - type: {{ .MountType }} source: {{ .WebMount }} target: /var/www/html {{ if eq .MountType "volume" }} volume: nocopy: true {{ else }} consistency: cached {{ end }} {{ end }} - ".:/mnt/ddev_config:ro" - "./nginx_full:/etc/nginx/sites-enabled:ro" - "./apache:/etc/apache2/sites-enabled:ro" - ddev-global-cache:/mnt/ddev-global-cache {{ if not .OmitSSHAgent }} - ddev-ssh-agent_socket_dir:/home/.ssh-agent {{ end }} restart: "{{ if .AutoRestartContainers }}always{{ else }}no{{ end }}" user: "$DDEV_UID:$DDEV_GID" hostname: {{ .Name }}-web {{if not .OmitDB }} links: - db:db {{end}} # ports is list of exposed *container* ports ports: - "{{ .DockerIP }}:$DDEV_HOST_WEBSERVER_PORT:80" - "{{ .DockerIP }}:$DDEV_HOST_HTTPS_PORT:443" environment: - COLUMNS - DOCROOT=${DDEV_DOCROOT} - DDEV_DOCROOT - DDEV_HOSTNAME - DDEV_PHP_VERSION - DDEV_PRIMARY_URL - DDEV_PROJECT - DDEV_PROJECT_TYPE - DDEV_ROUTER_HTTP_PORT - DDEV_ROUTER_HTTPS_PORT - DDEV_SITENAME - DDEV_TLD - DDEV_FILES_DIR - DDEV_WEBSERVER_TYPE - DDEV_XDEBUG_ENABLED - DEPLOY_NAME=local {{ if not .DisableSettingsManagement }} - DRUSH_OPTIONS_URI=$DDEV_PRIMARY_URL {{ end }} - DRUSH_ALLOW_XDEBUG=1 - DOCKER_IP={{ .DockerIP }} - HOST_DOCKER_INTERNAL_IP={{ .HostDockerInternalIP }} # HTTP_EXPOSE allows for ports accepting HTTP traffic to be accessible from <site>.ddev.site:<port> # To expose a container port to a different host port, define the port as hostPort:containerPort - HTTP_EXPOSE=${DDEV_ROUTER_HTTP_PORT}:80,${DDEV_MAILHOG_PORT}:{{ .MailhogPort }} # You can optionally expose an HTTPS port option for any ports defined in HTTP_EXPOSE. # To expose an HTTPS port, define the port as securePort:containerPort. - HTTPS_EXPOSE=${DDEV_ROUTER_HTTPS_PORT}:80,${DDEV_MAILHOG_HTTPS_PORT}:{{ .MailhogPort }} - IS_DDEV_PROJECT=true - LINES - MYSQL_HISTFILE=/mnt/ddev-global-cache/mysqlhistory/${DDEV_SITENAME}-web/mysql_history - PHP_IDE_CONFIG=serverName=${DDEV_SITENAME}.${DDEV_TLD} - SSH_AUTH_SOCK=/home/.ssh-agent/socket - TZ={{ .Timezone }} - VIRTUAL_HOST=${DDEV_HOSTNAME} {{ range $env := .WebEnvironment }}- "{{ $env }}" {{ end }} labels: com.ddev.site-name: ${DDEV_SITENAME} com.ddev.platform: {{ .Plugin }} com.ddev.app-type: {{ .AppType }} com.ddev.approot: $DDEV_APPROOT {{ if .HostDockerInternalIP }} extra_hosts: [ "host.docker.internal:{{ .HostDockerInternalIP }}" ] {{ end }} external_links: {{ range $hostname := .Hostnames }}- "ddev-router:{{ $hostname }}" {{ end }} healthcheck: interval: 1s retries: 120 start_period: 120s timeout: 120s {{ if not .OmitDBA }} dba: container_name: ddev-${DDEV_SITENAME}-dba image: $DDEV_DBAIMAGE restart: "{{ if .AutoRestartContainers }}always{{ else }}no{{ end }}" labels: com.ddev.site-name: ${DDEV_SITENAME} com.ddev.platform: {{ .Plugin }} com.ddev.app-type: {{ .AppType }} com.ddev.approot: $DDEV_APPROOT links: - db:db expose: - "80" hostname: {{ .Name }}-dba environment: - PMA_USER=root - PMA_PASSWORD=root - VIRTUAL_HOST=$DDEV_HOSTNAME - UPLOAD_LIMIT=1024M - TZ={{ .Timezone }} # HTTP_EXPOSE allows for ports accepting HTTP traffic to be accessible from <site>.ddev.site:<port> - HTTP_EXPOSE=${DDEV_PHPMYADMIN_PORT}:{{ .DBAPort }} - HTTPS_EXPOSE=${DDEV_PHPMYADMIN_HTTPS_PORT}:{{ .DBAPort }} healthcheck: interval: 120s timeout: 2s retries: 1 {{end}} networks: default: name: ddev_default external: true volumes: {{if not .OmitDB }} mariadb-database: name: "${DDEV_SITENAME}-mariadb" {{end}} {{ if not .OmitSSHAgent }} ddev-ssh-agent_socket_dir: external: true {{ end }} ddev-global-cache: name: ddev-global-cache {{ if and .NFSMountEnabled (not .NoProjectMount) }} nfsmount: driver: local driver_opts: type: nfs o: "addr={{ if .HostDockerInternalIP }}{{ .HostDockerInternalIP }}{{ else }}host.docker.internal{{end}},hard,nolock,rw" device: ':{{ .NFSSource }}' {{ end }} {{ if and .MutagenEnabled (not .NoProjectMount) }} project_mutagen: name: {{ .MutagenVolume }} {{ end }} ` // ConfigInstructions is used to add example hooks usage const ConfigInstructions = ` # Key features of ddev's config.yaml: # name: <projectname> # Name of the project, automatically provides # http://projectname.ddev.site and https://projectname.ddev.site # type: <projecttype> # drupal6/7/8, backdrop, typo3, wordpress, php # docroot: <relative_path> # Relative path to the directory containing index.php. # php_version: "7.4" # PHP version to use, "5.6", "7.0", "7.1", "7.2", "7.3", "7.4" "8.0" # You can explicitly specify the webimage, dbimage, dbaimage lines but this # is not recommended, as the images are often closely tied to ddev's' behavior, # so this can break upgrades. # webimage: <docker_image> # nginx/php docker image. # dbimage: <docker_image> # mariadb docker image. # dbaimage: <docker_image> # mariadb_version and mysql_version # ddev can use many versions of mariadb and mysql # However these directives are mutually exclusive # mariadb_version: 10.2 # mysql_version: 8.0 # router_http_port: <port> # Port to be used for http (defaults to port 80) # router_https_port: <port> # Port for https (defaults to 443) # xdebug_enabled: false # Set to true to enable xdebug and "ddev start" or "ddev restart" # Note that for most people the commands # "ddev xdebug" to enable xdebug and "ddev xdebug off" to disable it work better, # as leaving xdebug enabled all the time is a big performance hit. # xhprof_enabled: false # Set to true to enable xhprof and "ddev start" or "ddev restart" # Note that for most people the commands # "ddev xhprof" to enable xhprof and "ddev xhprof off" to disable it work better, # as leaving xhprof enabled all the time is a big performance hit. # webserver_type: nginx-fpm # or apache-fpm # timezone: Europe/Berlin # This is the timezone used in the containers and by PHP; # it can be set to any valid timezone, # see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # For example Europe/Dublin or MST7MDT # composer_version: "" # if composer_version:"" it will use the current ddev default composer release. # It can also be set to "1", to get most recent composer v1 # or "2" for most recent composer v2. # It can be set to any existing specific composer version. # After first project 'ddev start' this will not be updated until it changes # additional_hostnames: # - somename # - someothername # would provide http and https URLs for "somename.ddev.site" # and "someothername.ddev.site". # additional_fqdns: # - example.com # - sub1.example.com # would provide http and https URLs for "example.com" and "sub1.example.com" # Please take care with this because it can cause great confusion. # upload_dir: custom/upload/dir # would set the destination path for ddev import-files to custom/upload/dir. # working_dir: # web: /var/www/html # db: /home # would set the default working directory for the web and db services. # These values specify the destination directory for ddev ssh and the # directory in which commands passed into ddev exec are run. # omit_containers: [db, dba, ddev-ssh-agent] # Currently only these containers are supported. Some containers can also be # omitted globally in the ~/.ddev/global_config.yaml. Note that if you omit # the "db" container, several standard features of ddev that access the # database container will be unusable. # nfs_mount_enabled: false # Great performance improvement but requires host configuration first. # See https://ddev.readthedocs.io/en/stable/users/performance/#using-nfs-to-mount-the-project-into-the-container # mutagen_enabled: false # Experimental performance improvement using mutagen asynchronous updates. # See https://ddev.readthedocs.io/en/latest/users/performance/#using-mutagen # fail_on_hook_fail: False # Decide whether 'ddev start' should be interrupted by a failing hook # host_https_port: "59002" # The host port binding for https can be explicitly specified. It is # dynamic unless otherwise specified. # This is not used by most people, most people use the *router* instead # of the localhost port. # host_webserver_port: "59001" # The host port binding for the ddev-webserver can be explicitly specified. It is # dynamic unless otherwise specified. # This is not used by most people, most people use the *router* instead # of the localhost port. # host_db_port: "59002" # The host port binding for the ddev-dbserver can be explicitly specified. It is dynamic # unless explicitly specified. # phpmyadmin_port: "8036" # phpmyadmin_https_port: "8037" # The PHPMyAdmin ports can be changed from the default 8036 and 8037 # mailhog_port: "8025" # mailhog_https_port: "8026" # The MailHog ports can be changed from the default 8025 and 8026 # webimage_extra_packages: [php7.4-tidy, php-bcmath] # Extra Debian packages that are needed in the webimage can be added here # dbimage_extra_packages: [telnet,netcat] # Extra Debian packages that are needed in the dbimage can be added here # use_dns_when_possible: true # If the host has internet access and the domain configured can # successfully be looked up, DNS will be used for hostname resolution # instead of editing /etc/hosts # Defaults to true # project_tld: ddev.site # The top-level domain used for project URLs # The default "ddev.site" allows DNS lookup via a wildcard # If you prefer you can change this to "ddev.local" to preserve # pre-v1.9 behavior. # ngrok_args: --subdomain mysite --auth username:pass # Provide extra flags to the "ngrok http" command, see # https://ngrok.com/docs#http or run "ngrok http -h" # disable_settings_management: false # If true, ddev will not create CMS-specific settings files like # Drupal's settings.php/settings.ddev.php or TYPO3's AdditionalConfiguration.php # In this case the user must provide all such settings. # You can inject environment variables into the web container with: # web_environment: # - SOMEENV=somevalue # - SOMEOTHERENV=someothervalue # no_project_mount: false # (Experimental) If true, ddev will not mount the project into the web container; # the user is responsible for mounting it manually or via a script. # This is to enable experimentation with alternate file mounting strategies. # For advanced users only! # Many ddev commands can be extended to run tasks before or after the # ddev command is executed, for example "post-start", "post-import-db", # "pre-composer", "post-composer" # See https://ddev.readthedocs.io/en/stable/users/extending-commands/ for more # information on the commands that can be extended and the tasks you can define # for them. Example: #hooks: ` // SequelproTemplate is the template for Sequelpro config. var SequelproTemplate = `<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>ContentFilters</key> <dict/> <key>auto_connect</key> <true/> <key>data</key> <dict> <key>connection</key> <dict> <key>database</key> <string>%s</string> <key>host</key> <string>%s</string> <key>name</key> <string>drud/%s</string> <key>password</key> <string>%s</string> <key>port</key> <integer>%s</integer> <key>rdbms_type</key> <string>mysql</string> <key>sslCACertFileLocation</key> <string></string> <key>sslCACertFileLocationEnabled</key> <integer>0</integer> <key>sslCertificateFileLocation</key> <string></string> <key>sslCertificateFileLocationEnabled</key> <integer>0</integer> <key>sslKeyFileLocation</key> <string></string> <key>sslKeyFileLocationEnabled</key> <integer>0</integer> <key>type</key> <string>SPTCPIPConnection</string> <key>useSSL</key> <integer>0</integer> <key>user</key> <string>%s</string> </dict> </dict> <key>encrypted</key> <false/> <key>format</key> <string>connection</string> <key>queryFavorites</key> <array/> <key>queryHistory</key> <array/> <key>rdbms_type</key> <string>mysql</string> <key>rdbms_version</key> <string>5.5.44</string> <key>version</key> <integer>1</integer> </dict> </plist>` // DdevRouterTemplate is the template for the generic router container. const DdevRouterTemplate = `version: '{{ .compose_version }}' services: ddev-router: image: {{ .router_image }}:{{ .router_tag }} container_name: ddev-router ports:{{ $dockerIP := .dockerIP }}{{ if not .router_bind_all_interfaces }}{{ range $port := .ports }} - "{{ $dockerIP }}:{{ $port }}:{{ $port }}"{{ end }}{{ else }}{{ range $port := .ports }} - "{{ $port }}:{{ $port }}"{{ end }}{{ end }} volumes: - /var/run/docker.sock:/tmp/docker.sock:ro - ddev-global-cache:/mnt/ddev-global-cache:rw {{ if .letsencrypt }} - ddev-router-letsencrypt:/etc/letsencrypt:rw {{ end }} environment: - DISABLE_HTTP2={{ .disable_http2 }} {{ if .letsencrypt }} - LETSENCRYPT_EMAIL={{ .letsencrypt_email }} - USE_LETSENCRYPT={{ .letsencrypt }} {{ end }} restart: "{{ if .AutoRestartContainers }}always{{ else }}no{{ end }}" healthcheck: interval: 1s retries: 120 start_period: 120s timeout: 120s networks: default: name: ddev_default external: true volumes: ddev-global-cache: name: ddev-global-cache {{ if .letsencrypt }} ddev-router-letsencrypt: name: ddev-router-letsencrypt {{ end }} ` const DdevSSHAuthTemplate = `version: '{{ .compose_version }}' volumes: dot_ssh: socket_dir: name: ddev-ssh-agent_socket_dir services: ddev-ssh-agent: container_name: ddev-ssh-agent hostname: ddev-ssh-agent build: context: '{{ .BuildContext }}' args: BASE_IMAGE: {{ .ssh_auth_image }}:{{ .ssh_auth_tag }} username: '{{ .Username }}' uid: '{{ .UID }}' gid: '{{ .GID }}' image: {{ .ssh_auth_image }}:{{ .ssh_auth_tag }}-built restart: "{{ if .AutoRestartContainers }}always{{ else }}no{{ end }}" user: "$DDEV_UID:$DDEV_GID" volumes: - "dot_ssh:/tmp/.ssh" - "socket_dir:/tmp/.ssh-agent" environment: - SSH_AUTH_SOCK=/tmp/.ssh-agent/socket healthcheck: interval: 1s retries: 2 start_period: 10s timeout: 62s networks: default: name: ddev_default external: true `
1
15,623
Should this be DBWorkingDir?
drud-ddev
go
@@ -542,7 +542,7 @@ public class AutoScalingHandler extends RequestHandlerBase implements Permission String eventTypeStr = op.getStr(EVENT); if (op.hasError()) return currentConfig; - TriggerEventType eventType = TriggerEventType.valueOf(eventTypeStr.trim().toUpperCase(Locale.ROOT)); + TriggerEventType.valueOf(eventTypeStr.trim().toUpperCase(Locale.ROOT)); String waitForStr = op.getStr(WAIT_FOR, null);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.cloud.autoscaling; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.solr.api.Api; import org.apache.solr.api.ApiBag; import org.apache.solr.client.solrj.cloud.SolrCloudManager; import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig; import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException; import org.apache.solr.client.solrj.cloud.autoscaling.Clause; import org.apache.solr.client.solrj.cloud.autoscaling.Policy; import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper; import org.apache.solr.client.solrj.cloud.autoscaling.Preference; import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage; import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType; import org.apache.solr.common.MapWriter; import org.apache.solr.common.SolrException; import org.apache.solr.common.params.AutoScalingParams; import org.apache.solr.common.params.CollectionAdminParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.CommandOperation; import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.StrUtils; import org.apache.solr.common.util.TimeSource; import org.apache.solr.common.util.Utils; import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.handler.RequestHandlerUtils; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.security.AuthorizationContext; import org.apache.solr.security.PermissionNameProvider; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static java.util.stream.Collectors.collectingAndThen; import static java.util.stream.Collectors.toSet; import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH; import static org.apache.solr.common.params.AutoScalingParams.*; import static org.apache.solr.common.params.CommonParams.JSON; /** * Handler for /cluster/autoscaling */ public class AutoScalingHandler extends RequestHandlerBase implements PermissionNameProvider { public static final String HANDLER_PATH = "/admin/autoscaling"; private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); protected final SolrCloudManager cloudManager; protected final SolrResourceLoader loader; protected final AutoScaling.TriggerFactory triggerFactory; private final List<Map<String, String>> DEFAULT_ACTIONS = new ArrayList<>(3); private static Set<String> singletonCommands = Stream.of("set-cluster-preferences", "set-cluster-policy") .collect(collectingAndThen(toSet(), Collections::unmodifiableSet)); private final TimeSource timeSource; public AutoScalingHandler(SolrCloudManager cloudManager, SolrResourceLoader loader) { this.cloudManager = cloudManager; this.loader = loader; this.triggerFactory = new AutoScaling.TriggerFactoryImpl(loader, cloudManager); this.timeSource = cloudManager.getTimeSource(); Map<String, String> map = new HashMap<>(2); map.put(NAME, "compute_plan"); map.put(CLASS, "solr.ComputePlanAction"); DEFAULT_ACTIONS.add(map); map = new HashMap<>(2); map.put(NAME, "execute_plan"); map.put(CLASS, "solr.ExecutePlanAction"); DEFAULT_ACTIONS.add(map); } Optional<BiConsumer<SolrQueryResponse, AutoScalingConfig>> getSubpathExecutor(List<String> path, SolrQueryRequest request) { if (path.size() == 3) { if (DIAGNOSTICS.equals(path.get(2))) { return Optional.of((rsp, autoScalingConf) -> handleDiagnostics(rsp, autoScalingConf)); } else if (SUGGESTIONS.equals(path.get(2))) { return Optional.of((rsp, autoScalingConf) -> handleSuggestions(rsp, autoScalingConf, request.getParams())); } else { return Optional.empty(); } } return Optional.empty(); } @Override @SuppressWarnings({"unchecked", "rawtypes"}) public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { try { String httpMethod = (String) req.getContext().get("httpMethod"); RequestHandlerUtils.setWt(req, JSON); if ("GET".equals(httpMethod)) { String path = (String) req.getContext().get("path"); if (path == null) path = "/cluster/autoscaling"; List<String> parts = StrUtils.splitSmart(path, '/', true); if (parts.size() < 2 || parts.size() > 3) { // invalid throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown path: " + path); } AutoScalingConfig autoScalingConf = cloudManager.getDistribStateManager().getAutoScalingConfig(); if (parts.size() == 2) { autoScalingConf.writeMap(new MapWriter.EntryWriter() { @Override public MapWriter.EntryWriter put(CharSequence k, Object v) { rsp.getValues().add(k.toString(), v); return this; } }); } else { getSubpathExecutor(parts, req).ifPresent(it -> it.accept(rsp, autoScalingConf)); } } else { if (req.getContentStreams() == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No commands specified for autoscaling"); } String path = (String) req.getContext().get("path"); if (path != null) { List<String> parts = StrUtils.splitSmart(path, '/', true); if(parts.size() == 3){ getSubpathExecutor(parts, req).ifPresent(it -> { Map map = null; try { map = (Map) Utils.fromJSON(req.getContentStreams().iterator().next().getStream()); } catch (IOException e1) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "error parsing payload", e1); } it.accept(rsp, new AutoScalingConfig(map)); }); return; } } List<CommandOperation> ops = CommandOperation.readCommands(req.getContentStreams(), rsp.getValues(), singletonCommands); if (ops == null) { // errors have already been added to the response so there's nothing left to do return; } processOps(req, rsp, ops); } } catch (Exception e) { rsp.getValues().add("result", "failure"); throw e; } finally { RequestHandlerUtils.addExperimentalFormatWarning(rsp); } } @SuppressWarnings({"unchecked"}) private void handleSuggestions(SolrQueryResponse rsp, AutoScalingConfig autoScalingConf, SolrParams params) { rsp.getValues().add("suggestions", PolicyHelper.getSuggestions(autoScalingConf, cloudManager, params)); } @SuppressWarnings({"unchecked", "rawtypes"}) public void processOps(SolrQueryRequest req, SolrQueryResponse rsp, List<CommandOperation> ops) throws KeeperException, InterruptedException, IOException { while (true) { AutoScalingConfig initialConfig = cloudManager.getDistribStateManager().getAutoScalingConfig(); AutoScalingConfig currentConfig = initialConfig; for (CommandOperation op : ops) { switch (op.name) { case CMD_SET_TRIGGER: currentConfig = handleSetTrigger(req, rsp, op, currentConfig); break; case CMD_REMOVE_TRIGGER: currentConfig = handleRemoveTrigger(req, rsp, op, currentConfig); break; case CMD_SET_LISTENER: currentConfig = handleSetListener(req, rsp, op, currentConfig); break; case CMD_REMOVE_LISTENER: currentConfig = handleRemoveListener(req, rsp, op, currentConfig); break; case CMD_SUSPEND_TRIGGER: currentConfig = handleSuspendTrigger(req, rsp, op, currentConfig); break; case CMD_RESUME_TRIGGER: currentConfig = handleResumeTrigger(req, rsp, op, currentConfig); break; case CMD_SET_POLICY: currentConfig = handleSetPolicies(req, rsp, op, currentConfig); break; case CMD_REMOVE_POLICY: currentConfig = handleRemovePolicy(req, rsp, op, currentConfig); break; case CMD_SET_CLUSTER_PREFERENCES: currentConfig = handleSetClusterPreferences(req, rsp, op, currentConfig); break; case CMD_SET_CLUSTER_POLICY: currentConfig = handleSetClusterPolicy(req, rsp, op, currentConfig); break; case CMD_SET_PROPERTIES: currentConfig = handleSetProperties(req, rsp, op, currentConfig); break; default: op.addError("Unknown command: " + op.name); } } List errs = CommandOperation.captureErrors(ops); if (!errs.isEmpty()) { throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, "Error in command payload", errs); } if (!currentConfig.equals(initialConfig)) { // update in ZK if (setAutoScalingConfig(currentConfig)) { break; } else { // someone else updated the config, get the latest one and re-apply our ops rsp.getValues().add("retry", "initialVersion=" + initialConfig.getZkVersion()); continue; } } else { // no changes break; } } rsp.getValues().add("result", "success"); } private AutoScalingConfig handleSetProperties(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) { Map<String, Object> map = op.getDataMap() == null ? Collections.emptyMap() : op.getDataMap(); Map<String, Object> configProps = new HashMap<>(currentConfig.getProperties()); configProps.putAll(map); // remove a key which is set to null map.forEach((k, v) -> { if (v == null) configProps.remove(k); }); return currentConfig.withProperties(configProps); } @SuppressWarnings({"unchecked"}) private void handleDiagnostics(SolrQueryResponse rsp, AutoScalingConfig autoScalingConf) { Policy policy = autoScalingConf.getPolicy(); rsp.getValues().add("diagnostics", PolicyHelper.getDiagnostics(policy, cloudManager)); } @SuppressWarnings({"unchecked"}) private AutoScalingConfig handleSetClusterPolicy(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException { List<Map<String, Object>> clusterPolicy = (List<Map<String, Object>>) op.getCommandData(); if (clusterPolicy == null || !(clusterPolicy instanceof List)) { op.addError("set-cluster-policy expects an array of objects"); return currentConfig; } List<Clause> cp = null; try { cp = clusterPolicy.stream().map(Clause::create).collect(Collectors.toList()); } catch (Exception e) { op.addError(e.getMessage()); return currentConfig; } Policy p = currentConfig.getPolicy().withClusterPolicy(cp); currentConfig = currentConfig.withPolicy(p); return currentConfig; } @SuppressWarnings({"unchecked"}) private AutoScalingConfig handleSetClusterPreferences(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException { List<Map<String, Object>> preferences = (List<Map<String, Object>>) op.getCommandData(); if (preferences == null || !(preferences instanceof List)) { op.addError("A list of cluster preferences not found"); return currentConfig; } List<Preference> prefs = null; try { prefs = preferences.stream().map(Preference::new).collect(Collectors.toList()); } catch (Exception e) { op.addError(e.getMessage()); return currentConfig; } Policy p = currentConfig.getPolicy().withClusterPreferences(prefs); currentConfig = currentConfig.withPolicy(p); return currentConfig; } private AutoScalingConfig handleRemovePolicy(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException { String policyName = (String) op.getVal(""); if (op.hasError()) return currentConfig; Map<String, List<Clause>> policies = currentConfig.getPolicy().getPolicies(); if (policies == null || !policies.containsKey(policyName)) { op.addError("No policy exists with name: " + policyName); return currentConfig; } cloudManager.getClusterStateProvider().getClusterState().forEachCollection(coll -> { if (policyName.equals(coll.getPolicyName())) op.addError(StrUtils.formatString("policy : {0} is being used by collection {1}", policyName, coll.getName())); }); if (op.hasError()) return currentConfig; policies = new HashMap<>(policies); policies.remove(policyName); Policy p = currentConfig.getPolicy().withPolicies(policies); currentConfig = currentConfig.withPolicy(p); return currentConfig; } @SuppressWarnings({"unchecked"}) private AutoScalingConfig handleSetPolicies(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException { Map<String, Object> policiesMap = op.getDataMap(); for (Map.Entry<String, Object> policy : policiesMap.entrySet()) { String policyName = policy.getKey(); if (policyName == null || policyName.trim().length() == 0) { op.addError("The policy name cannot be null or empty"); return currentConfig; } } Map<String, List<Clause>> currentClauses = new HashMap<>(currentConfig.getPolicy().getPolicies()); Map<String, List<Clause>> newClauses = null; try { newClauses = Policy.clausesFromMap((Map<String, List<Map<String, Object>>>) op.getCommandData(), new ArrayList<>() ); } catch (Exception e) { op.addError(e.getMessage()); return currentConfig; } currentClauses.putAll(newClauses); Policy p = currentConfig.getPolicy().withPolicies(currentClauses); currentConfig = currentConfig.withPolicy(p); return currentConfig; } @SuppressWarnings({"unchecked"}) private AutoScalingConfig handleResumeTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException { String triggerName = op.getStr(NAME); if (op.hasError()) return currentConfig; Map<String, AutoScalingConfig.TriggerConfig> triggers = currentConfig.getTriggerConfigs(); Set<String> changed = new HashSet<>(); if (!Policy.EACH.equals(triggerName) && !triggers.containsKey(triggerName)) { op.addError("No trigger exists with name: " + triggerName); return currentConfig; } Map<String, AutoScalingConfig.TriggerConfig> newTriggers = new HashMap<>(); for (Map.Entry<String, AutoScalingConfig.TriggerConfig> entry : triggers.entrySet()) { if (Policy.EACH.equals(triggerName) || triggerName.equals(entry.getKey())) { AutoScalingConfig.TriggerConfig trigger = entry.getValue(); if (!trigger.enabled) { trigger = trigger.withEnabled(true); newTriggers.put(entry.getKey(), trigger); changed.add(entry.getKey()); } else { newTriggers.put(entry.getKey(), entry.getValue()); } } else { newTriggers.put(entry.getKey(), entry.getValue()); } } rsp.getValues().add("changed", changed); if (!changed.isEmpty()) { currentConfig = currentConfig.withTriggerConfigs(newTriggers); } return currentConfig; } @SuppressWarnings({"unchecked"}) private AutoScalingConfig handleSuspendTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException { String triggerName = op.getStr(NAME); if (op.hasError()) return currentConfig; String timeout = op.getStr(TIMEOUT, null); Date resumeTime = null; if (timeout != null) { try { int timeoutSeconds = parseHumanTime(timeout); resumeTime = new Date(TimeUnit.MILLISECONDS.convert(timeSource.getTimeNs(), TimeUnit.NANOSECONDS) + TimeUnit.MILLISECONDS.convert(timeoutSeconds, TimeUnit.SECONDS)); } catch (IllegalArgumentException e) { op.addError("Invalid 'timeout' value for suspend trigger: " + triggerName); return currentConfig; } } Map<String, AutoScalingConfig.TriggerConfig> triggers = currentConfig.getTriggerConfigs(); Set<String> changed = new HashSet<>(); if (!Policy.EACH.equals(triggerName) && !triggers.containsKey(triggerName)) { op.addError("No trigger exists with name: " + triggerName); return currentConfig; } Map<String, AutoScalingConfig.TriggerConfig> newTriggers = new HashMap<>(); for (Map.Entry<String, AutoScalingConfig.TriggerConfig> entry : triggers.entrySet()) { if (Policy.EACH.equals(triggerName) || triggerName.equals(entry.getKey())) { AutoScalingConfig.TriggerConfig trigger = entry.getValue(); if (trigger.enabled) { trigger = trigger.withEnabled(false); if (resumeTime != null) { trigger = trigger.withProperty(RESUME_AT, resumeTime.getTime()); } newTriggers.put(entry.getKey(), trigger); changed.add(trigger.name); } else { newTriggers.put(entry.getKey(), entry.getValue()); } } else { newTriggers.put(entry.getKey(), entry.getValue()); } } rsp.getValues().add("changed", changed); if (!changed.isEmpty()) { currentConfig = currentConfig.withTriggerConfigs(newTriggers); } return currentConfig; } private AutoScalingConfig handleRemoveListener(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException { String listenerName = op.getStr(NAME); if (op.hasError()) return currentConfig; Map<String, AutoScalingConfig.TriggerListenerConfig> listeners = currentConfig.getTriggerListenerConfigs(); if (listeners == null || !listeners.containsKey(listenerName)) { op.addError("No listener exists with name: " + listenerName); return currentConfig; } currentConfig = currentConfig.withoutTriggerListenerConfig(listenerName); return currentConfig; } private AutoScalingConfig handleSetListener(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException { String listenerName = op.getStr(NAME); String triggerName = op.getStr(TRIGGER); List<String> stageNames = op.getStrs(STAGE, Collections.emptyList()); String listenerClass = op.getStr(CLASS); List<String> beforeActions = op.getStrs(BEFORE_ACTION, Collections.emptyList()); List<String> afterActions = op.getStrs(AFTER_ACTION, Collections.emptyList()); if (op.hasError()) return currentConfig; Map<String, AutoScalingConfig.TriggerConfig> triggers = currentConfig.getTriggerConfigs(); if (triggers == null || !triggers.containsKey(triggerName)) { op.addError("A trigger with the name " + triggerName + " does not exist"); return currentConfig; } AutoScalingConfig.TriggerConfig triggerConfig = triggers.get(triggerName); if (stageNames.isEmpty() && beforeActions.isEmpty() && afterActions.isEmpty()) { op.addError("Either 'stage' or 'beforeAction' or 'afterAction' must be specified"); return currentConfig; } for (String stage : stageNames) { try { TriggerEventProcessorStage.valueOf(stage); } catch (IllegalArgumentException e) { op.addError("Invalid stage name: " + stage); } } if (op.hasError()) return currentConfig; AutoScalingConfig.TriggerListenerConfig listenerConfig = new AutoScalingConfig.TriggerListenerConfig(listenerName, op.getValuesExcluding("name")); // validate that we can load the listener class // todo allow creation from blobstore TriggerListener listener = null; try { listener = loader.newInstance(listenerClass, TriggerListener.class); listener.configure(loader, cloudManager, listenerConfig); } catch (TriggerValidationException e) { log.warn("invalid listener configuration", e); op.addError("invalid listener configuration: " + e.toString()); return currentConfig; } catch (Exception e) { log.warn("error loading listener class ", e); op.addError("Listener not found: " + listenerClass + ". error message:" + e.getMessage()); return currentConfig; } finally { if (listener != null) { IOUtils.closeQuietly(listener); } } Set<String> actionNames = new HashSet<>(); actionNames.addAll(beforeActions); actionNames.addAll(afterActions); for (AutoScalingConfig.ActionConfig action : triggerConfig.actions) { actionNames.remove(action.name); } if (!actionNames.isEmpty()) { op.addError("The trigger '" + triggerName + "' does not have actions named: " + actionNames); return currentConfig; } // todo - handle races between competing set-trigger and set-listener invocations currentConfig = currentConfig.withTriggerListenerConfig(listenerConfig); return currentConfig; } @SuppressWarnings({"unchecked"}) private AutoScalingConfig handleSetTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException { // we're going to modify the op - use a copy String triggerName = op.getStr(NAME); String eventTypeStr = op.getStr(EVENT); if (op.hasError()) return currentConfig; TriggerEventType eventType = TriggerEventType.valueOf(eventTypeStr.trim().toUpperCase(Locale.ROOT)); String waitForStr = op.getStr(WAIT_FOR, null); CommandOperation opCopy = new CommandOperation(op.name, Utils.getDeepCopy((Map) op.getCommandData(), 10)); if (waitForStr != null) { int seconds = 0; try { seconds = parseHumanTime(waitForStr); } catch (IllegalArgumentException e) { op.addError("Invalid 'waitFor' value '" + waitForStr + "' in trigger: " + triggerName); return currentConfig; } opCopy.getDataMap().put(WAIT_FOR, seconds); } Integer lowerBound = op.getInt(LOWER_BOUND, null); Integer upperBound = op.getInt(UPPER_BOUND, null); List<Map<String, String>> actions = (List<Map<String, String>>) op.getVal(ACTIONS); if (actions == null) { actions = DEFAULT_ACTIONS; opCopy.getDataMap().put(ACTIONS, actions); } // validate that we can load all the actions // todo allow creation from blobstore for (Map<String, String> action : actions) { if (!action.containsKey(NAME) || !action.containsKey(CLASS)) { op.addError("No 'name' or 'class' specified for action: " + action); return currentConfig; } String klass = action.get(CLASS); try { loader.findClass(klass, TriggerAction.class); } catch (Exception e) { log.warn("Could not load class : ", e); op.addError("Action not found: " + klass + " " + e.getMessage()); return currentConfig; } } AutoScalingConfig.TriggerConfig trigger = new AutoScalingConfig.TriggerConfig(triggerName, opCopy.getValuesExcluding("name")); // validate trigger config AutoScaling.Trigger t = null; try { t = triggerFactory.create(trigger.event, trigger.name, trigger.properties); } catch (Exception e) { op.addError("Error validating trigger config " + trigger.name + ": " + e.toString()); return currentConfig; } finally { if (t != null) { IOUtils.closeQuietly(t); } } currentConfig = currentConfig.withTriggerConfig(trigger); // check that there's a default SystemLogListener, unless user specified another one return withSystemLogListener(currentConfig, triggerName); } private static String fullName = SystemLogListener.class.getName(); private static String solrName = "solr." + SystemLogListener.class.getSimpleName(); public static AutoScalingConfig withSystemLogListener(AutoScalingConfig autoScalingConfig, String triggerName) { Map<String, AutoScalingConfig.TriggerListenerConfig> configs = autoScalingConfig.getTriggerListenerConfigs(); for (AutoScalingConfig.TriggerListenerConfig cfg : configs.values()) { if (triggerName.equals(cfg.trigger)) { // already has some listener config return autoScalingConfig; } } // need to add Map<String, Object> properties = new HashMap<>(); properties.put(AutoScalingParams.CLASS, SystemLogListener.class.getName()); properties.put(AutoScalingParams.TRIGGER, triggerName); properties.put(AutoScalingParams.STAGE, EnumSet.allOf(TriggerEventProcessorStage.class)); AutoScalingConfig.TriggerListenerConfig listener = new AutoScalingConfig.TriggerListenerConfig(triggerName + CollectionAdminParams.SYSTEM_COLL, properties); autoScalingConfig = autoScalingConfig.withTriggerListenerConfig(listener); return autoScalingConfig; } private int parseHumanTime(String timeStr) { char c = timeStr.charAt(timeStr.length() - 1); long timeValue = Long.parseLong(timeStr.substring(0, timeStr.length() - 1)); int seconds; switch (c) { case 'h': seconds = (int) TimeUnit.HOURS.toSeconds(timeValue); break; case 'm': seconds = (int) TimeUnit.MINUTES.toSeconds(timeValue); break; case 's': seconds = (int) timeValue; break; default: throw new IllegalArgumentException("Invalid time value"); } return seconds; } private AutoScalingConfig handleRemoveTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) throws KeeperException, InterruptedException { String triggerName = op.getStr(NAME); boolean removeListeners = op.getBoolean(REMOVE_LISTENERS, false); if (op.hasError()) return currentConfig; Map<String, AutoScalingConfig.TriggerConfig> triggerConfigs = currentConfig.getTriggerConfigs(); if (!triggerConfigs.containsKey(triggerName)) { op.addError("No trigger exists with name: " + triggerName); return currentConfig; } triggerConfigs = new HashMap<>(triggerConfigs); Set<String> activeListeners = new HashSet<>(); Map<String, AutoScalingConfig.TriggerListenerConfig> listeners = currentConfig.getTriggerListenerConfigs(); for (AutoScalingConfig.TriggerListenerConfig listener : listeners.values()) { if (triggerName.equals(listener.trigger)) { activeListeners.add(listener.name); } } if (!activeListeners.isEmpty()) { boolean onlySystemLog = false; if (activeListeners.size() == 1) { AutoScalingConfig.TriggerListenerConfig cfg = listeners.get(activeListeners.iterator().next()); if (SystemLogListener.class.getName().equals(cfg.listenerClass) || ("solr." + SystemLogListener.class.getSimpleName()).equals(cfg.listenerClass)) { onlySystemLog = true; } } if (removeListeners || onlySystemLog) { listeners = new HashMap<>(listeners); listeners.keySet().removeAll(activeListeners); } else { op.addError("Cannot remove trigger: " + triggerName + " because it has active listeners: " + activeListeners); return currentConfig; } } triggerConfigs.remove(triggerName); currentConfig = currentConfig.withTriggerConfigs(triggerConfigs).withTriggerListenerConfigs(listeners); return currentConfig; } private boolean setAutoScalingConfig(AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException { verifyAutoScalingConf(currentConfig); try { cloudManager.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(currentConfig), currentConfig.getZkVersion()); } catch (BadVersionException bve) { // somebody else has changed the configuration so we must retry return false; } //log.debug("-- saved version " + currentConfig.getZkVersion() + ": " + currentConfig); return true; } private void verifyAutoScalingConf(AutoScalingConfig autoScalingConf) throws IOException { Policy.Session session = autoScalingConf.getPolicy() .createSession(cloudManager); log.debug("Verified autoscaling configuration"); } @Override public String getDescription() { return "A handler for autoscaling configuration"; } @Override public Name getPermissionName(AuthorizationContext request) { switch (request.getHttpMethod()) { case "GET": return Name.AUTOSCALING_READ_PERM; case "POST": { return StrUtils.splitSmart(request.getResource(), '/', true).size() == 3 ? Name.AUTOSCALING_READ_PERM : Name.AUTOSCALING_WRITE_PERM; } default: return null; } } @Override public Collection<Api> getApis() { return ApiBag.wrapRequestHandlers(this, "autoscaling.Commands"); } @Override public Boolean registerV2() { return Boolean.TRUE; } @Override public SolrRequestHandler getSubHandler(String path) { if (path.equals("/diagnostics") || path.equals("/suggestions")) return this; return null; } }
1
34,920
I think you can remove the whole line.
apache-lucene-solr
java
@@ -8,6 +8,7 @@ package consensus import ( "context" + "github.com/iotexproject/iotex-core/db" "math/big" "github.com/facebookgo/clock"
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package consensus import ( "context" "math/big" "github.com/facebookgo/clock" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/consensus/scheme" "github.com/iotexproject/iotex-core/consensus/scheme/rolldpos" explorerapi "github.com/iotexproject/iotex-core/explorer/idl/explorer" "github.com/iotexproject/iotex-core/iotxaddress" "github.com/iotexproject/iotex-core/logger" "github.com/iotexproject/iotex-core/network" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/lifecycle" "github.com/iotexproject/iotex-core/proto" "github.com/iotexproject/iotex-core/state" ) // Consensus is the interface for handling IotxConsensus view change. type Consensus interface { lifecycle.StartStopper HandleBlockPropose(*iproto.ProposePb) error HandleEndorse(*iproto.EndorsePb) error Metrics() (scheme.ConsensusMetrics, error) } // IotxConsensus implements Consensus type IotxConsensus struct { cfg config.Consensus scheme scheme.Scheme } type optionParams struct { rootChainAPI explorerapi.Explorer } // Option sets Consensus construction parameter. type Option func(op *optionParams) error // WithRootChainAPI is an option to add a root chain api to Consensus. func WithRootChainAPI(exp explorerapi.Explorer) Option { return func(ops *optionParams) error { ops.rootChainAPI = exp return nil } } // NewConsensus creates a IotxConsensus struct. func NewConsensus( cfg config.Config, bc blockchain.Blockchain, ap actpool.ActPool, p2p network.Overlay, opts ...Option, ) Consensus { if bc == nil || ap == nil || p2p == nil { logger.Panic().Msg("Try to attach to nil blockchain, action pool or p2p interface") } var ops optionParams for _, opt := range opts { if err := opt(&ops); err != nil { return nil } } cs := &IotxConsensus{cfg: cfg.Consensus} mintBlockCB := func() (*blockchain.Block, error) { acts := ap.PickActs() logger.Debug(). Int("actions", len(acts)). Msg("pick actions") blk, err := bc.MintNewBlock(acts, GetAddr(cfg), nil, nil, "") if err != nil { logger.Error().Err(err).Msg("Failed to mint a block") return nil, err } logger.Info(). Uint64("height", blk.Height()). Int("length", len(blk.Actions)). Msg("created a new block") return blk, nil } commitBlockCB := func(blk *blockchain.Block) error { err := bc.CommitBlock(blk) if err != nil { logger.Error().Err(err).Int64("Height", int64(blk.Height())).Msg("Failed to commit the block") } // Remove transfers in this block from ActPool and reset ActPool state ap.Reset() return err } broadcastBlockCB := func(blk *blockchain.Block) error { if blkPb := blk.ConvertToBlockPb(); blkPb != nil { return p2p.Broadcast(bc.ChainID(), blkPb) } return nil } var err error clock := clock.New() switch cfg.Consensus.Scheme { case config.RollDPoSScheme: bd := rolldpos.NewRollDPoSBuilder(). SetAddr(GetAddr(cfg)). SetConfig(cfg.Consensus.RollDPoS). SetBlockchain(bc). SetActPool(ap). SetClock(clock). SetP2P(p2p) if ops.rootChainAPI != nil { bd = bd.SetCandidatesByHeightFunc(func(h uint64) ([]*state.Candidate, error) { rawcs, err := ops.rootChainAPI.GetCandidateMetricsByHeight(int64(h)) if err != nil { return nil, errors.Wrapf(err, "error when get root chain candidates at height %d", h) } cs := make([]*state.Candidate, 0, len(rawcs.Candidates)) for _, rawc := range rawcs.Candidates { // TODO: this is a short term walk around. We don't need to convert root chain address to sub chain // address. Instead we should use public key to identify the block producer rootChainAddr, err := address.IotxAddressToAddress(rawc.Address) if err != nil { return nil, errors.Wrapf(err, "error when get converting iotex address to address") } subChainAddr := address.New(cfg.Chain.ID, rootChainAddr.Payload()) pubKey, err := keypair.DecodePublicKey(rawc.PubKey) if err != nil { logger.Error().Err(err).Msg("error when convert candidate PublicKey") } votes, ok := big.NewInt(0).SetString(rawc.TotalVote, 10) if !ok { logger.Error().Err(err).Msg("error when setting candidate total votes") } cs = append(cs, &state.Candidate{ Address: subChainAddr.IotxAddress(), PublicKey: pubKey, Votes: votes, CreationHeight: uint64(rawc.CreationHeight), LastUpdateHeight: uint64(rawc.LastUpdateHeight), }) } return cs, nil }) bd = bd.SetRootChainAPI(ops.rootChainAPI) } cs.scheme, err = bd.Build() if err != nil { logger.Panic().Err(err).Msg("error when constructing RollDPoS") } case config.NOOPScheme: cs.scheme = scheme.NewNoop() case config.StandaloneScheme: cs.scheme = scheme.NewStandalone( mintBlockCB, commitBlockCB, broadcastBlockCB, bc, cfg.Consensus.BlockCreationInterval, ) default: logger.Error(). Str("scheme", cfg.Consensus.Scheme). Msg("Unexpected IotxConsensus scheme") return nil } return cs } // Start starts running the consensus algorithm func (c *IotxConsensus) Start(ctx context.Context) error { logger.Info(). Str("scheme", c.cfg.Scheme). Msg("Starting IotxConsensus scheme") err := c.scheme.Start(ctx) if err != nil { return errors.Wrapf(err, "failed to start scheme %s", c.cfg.Scheme) } return nil } // Stop stops running the consensus algorithm func (c *IotxConsensus) Stop(ctx context.Context) error { logger.Info(). Str("scheme", c.cfg.Scheme). Msg("Stopping IotxConsensus scheme") err := c.scheme.Stop(ctx) if err != nil { return errors.Wrapf(err, "failed to stop scheme %s", c.cfg.Scheme) } return nil } // Metrics returns consensus metrics func (c *IotxConsensus) Metrics() (scheme.ConsensusMetrics, error) { return c.scheme.Metrics() } // HandleBlockPropose handles a proposed block func (c *IotxConsensus) HandleBlockPropose(propose *iproto.ProposePb) error { return c.scheme.HandleBlockPropose(propose) } // HandleEndorse handle an endorse func (c *IotxConsensus) HandleEndorse(endorse *iproto.EndorsePb) error { return c.scheme.HandleEndorse(endorse) } // Scheme returns the scheme instance func (c *IotxConsensus) Scheme() scheme.Scheme { return c.scheme } // GetAddr returns the iotex address func GetAddr(cfg config.Config) *iotxaddress.Address { addr, err := cfg.BlockchainAddress() if err != nil { logger.Panic().Err(err).Msg("Fail to create new consensus") } pk, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey) if err != nil { logger.Panic().Err(err).Msg("Fail to create new consensus") } sk, err := keypair.DecodePrivateKey(cfg.Chain.ProducerPrivKey) if err != nil { logger.Panic().Err(err).Msg("Fail to create new consensus") } return &iotxaddress.Address{ PublicKey: pk, PrivateKey: sk, RawAddress: addr.IotxAddress(), } }
1
13,517
File is not `goimports`-ed (from `goimports`)
iotexproject-iotex-core
go
@@ -40,12 +40,13 @@ public class ClientHarvestRun implements Serializable { this.id = id; } - public enum RunResultType { SUCCESS, FAILURE, INPROGRESS }; + public enum RunResultType { SUCCESS, RUN_FAILED, RUN_IN_PROGRESS, DELETE_FAILED }; private static String RESULT_LABEL_SUCCESS = "SUCCESS"; - private static String RESULT_LABEL_FAILURE = "FAILED"; - private static String RESULT_LABEL_INPROGRESS = "IN PROGRESS"; - private static String RESULT_DELETE_IN_PROGRESS = "DELETE IN PROGRESS"; + private static String RESULT_LABEL_RUN_FAILED = "RUN FAILED"; + private static String RESULT_LABEL_RUN_IN_PROGRESS = "RUN IN PROGRESS"; + private static String RESULT_LABEL_DELETE_IN_PROGRESS = "DELETE IN PROGRESS"; + private static String RESULT_LABEL_DELETE_FAILED = "DELETE FAILED"; @ManyToOne @JoinColumn(nullable = false)
1
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package edu.harvard.iq.dataverse.harvest.client; import java.io.Serializable; import java.util.Date; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.Temporal; import javax.persistence.TemporalType; /** * * @author Leonid Andreev * * This is a record of an attempted harvesting client run. (Should it be named * HarvestingClientRunResult instead?) */ @Entity public class ClientHarvestRun implements Serializable { private static final long serialVersionUID = 1L; @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public enum RunResultType { SUCCESS, FAILURE, INPROGRESS }; private static String RESULT_LABEL_SUCCESS = "SUCCESS"; private static String RESULT_LABEL_FAILURE = "FAILED"; private static String RESULT_LABEL_INPROGRESS = "IN PROGRESS"; private static String RESULT_DELETE_IN_PROGRESS = "DELETE IN PROGRESS"; @ManyToOne @JoinColumn(nullable = false) private HarvestingClient harvestingClient; public HarvestingClient getHarvestingClient() { return harvestingClient; } public void setHarvestingClient(HarvestingClient harvestingClient) { this.harvestingClient = harvestingClient; } private RunResultType harvestResult; public RunResultType getResult() { return harvestResult; } public String getResultLabel() { if (harvestingClient != null && harvestingClient.isDeleteInProgress()) { return RESULT_DELETE_IN_PROGRESS; } if (isSuccess()) { return RESULT_LABEL_SUCCESS; } else if (isFailed()) { return RESULT_LABEL_FAILURE; } else if (isInProgress()) { return RESULT_LABEL_INPROGRESS; } return null; } public String getDetailedResultLabel() { if (harvestingClient != null && harvestingClient.isDeleteInProgress()) { return RESULT_DELETE_IN_PROGRESS; } if (isSuccess()) { String resultLabel = RESULT_LABEL_SUCCESS; resultLabel = resultLabel.concat("; "+harvestedDatasetCount+" harvested, "); resultLabel = resultLabel.concat(deletedDatasetCount+" deleted, "); resultLabel = resultLabel.concat(failedDatasetCount+" failed."); return resultLabel; } else if (isFailed()) { return RESULT_LABEL_FAILURE; } else if (isInProgress()) { return RESULT_LABEL_INPROGRESS; } return null; } public void setResult(RunResultType harvestResult) { this.harvestResult = harvestResult; } public boolean isSuccess() { return RunResultType.SUCCESS == harvestResult; } public void setSuccess() { harvestResult = RunResultType.SUCCESS; } public boolean isFailed() { return RunResultType.FAILURE == harvestResult; } public void setFailed() { harvestResult = RunResultType.FAILURE; } public boolean isInProgress() { return RunResultType.INPROGRESS == harvestResult || (harvestResult == null && startTime != null && finishTime == null); } public void setInProgress() { harvestResult = RunResultType.INPROGRESS; } // Time of this harvest attempt: @Temporal(value = TemporalType.TIMESTAMP) private Date startTime; public Date getStartTime() { return startTime; } public void setStartTime(Date startTime) { this.startTime = startTime; } @Temporal(value = TemporalType.TIMESTAMP) private Date finishTime; public Date getFinishTime() { return finishTime; } public void setFinishTime(Date finishTime) { this.finishTime = finishTime; } // Tese are the Dataset counts from that last harvest: // (TODO: do we need to differentiate between *created* (new), and *updated* // harvested datasets? -- L.A. 4.4 private Long harvestedDatasetCount = 0L; private Long failedDatasetCount = 0L; private Long deletedDatasetCount = 0L; public Long getHarvestedDatasetCount() { return harvestedDatasetCount; } public void setHarvestedDatasetCount(Long harvestedDatasetCount) { this.harvestedDatasetCount = harvestedDatasetCount; } public Long getFailedDatasetCount() { return failedDatasetCount; } public void setFailedDatasetCount(Long failedDatasetCount) { this.failedDatasetCount = failedDatasetCount; } public Long getDeletedDatasetCount() { return deletedDatasetCount; } public void setDeletedDatasetCount(Long deletedDatasetCount) { this.deletedDatasetCount = deletedDatasetCount; } @Override public int hashCode() { int hash = 0; hash += (id != null ? id.hashCode() : 0); return hash; } @Override public boolean equals(Object object) { // TODO: Warning - this method won't work in the case the id fields are not set if (!(object instanceof ClientHarvestRun)) { return false; } ClientHarvestRun other = (ClientHarvestRun) object; if ((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))) { return false; } return true; } @Override public String toString() { return "edu.harvard.iq.dataverse.harvest.client.HarvestingClientRun[ id=" + id + " ]"; } }
1
42,422
Not a big deal, but what is the value of adding this status condition for a failure to delete a client? It has some value for a dataverse admin, to know that the last attempt to harvest from a certain server resulted in a failure. Is it really useful to know that an attempt to delete a client failed? - should the startup check simply remove the "delete in progress" flag quietly instead? After all, seeing how the client is still there makes it somewhat clear that the attempt to get rid of it didn't work out, so they should try again? I may be missing some situation where it could actually be useful - so I'm open to hearing it.
IQSS-dataverse
java
@@ -85,7 +85,7 @@ public abstract class BaseWriterFactory<T> implements WriterFactory<T> { OutputFile outputFile = file.encryptingOutputFile(); EncryptionKeyMetadata keyMetadata = file.keyMetadata(); Map<String, String> properties = table.properties(); - MetricsConfig metricsConfig = MetricsConfig.fromProperties(properties); + MetricsConfig metricsConfig = MetricsConfig.forTable(table); try { switch (dataFileFormat) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.data; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Map; import org.apache.iceberg.FileFormat; import org.apache.iceberg.MetricsConfig; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortOrder; import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.avro.Avro; import org.apache.iceberg.deletes.EqualityDeleteWriter; import org.apache.iceberg.deletes.PositionDeleteWriter; import org.apache.iceberg.encryption.EncryptedOutputFile; import org.apache.iceberg.encryption.EncryptionKeyMetadata; import org.apache.iceberg.io.DataWriter; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.io.WriterFactory; import org.apache.iceberg.orc.ORC; import org.apache.iceberg.parquet.Parquet; /** * A base writer factory to be extended by query engine integrations. */ public abstract class BaseWriterFactory<T> implements WriterFactory<T> { private final Table table; private final FileFormat dataFileFormat; private final Schema dataSchema; private final SortOrder dataSortOrder; private final FileFormat deleteFileFormat; private final int[] equalityFieldIds; private final Schema equalityDeleteRowSchema; private final SortOrder equalityDeleteSortOrder; private final Schema positionDeleteRowSchema; protected BaseWriterFactory(Table table, FileFormat dataFileFormat, Schema dataSchema, SortOrder dataSortOrder, FileFormat deleteFileFormat, int[] equalityFieldIds, Schema equalityDeleteRowSchema, SortOrder equalityDeleteSortOrder, Schema positionDeleteRowSchema) { this.table = table; this.dataFileFormat = dataFileFormat; this.dataSchema = dataSchema; this.dataSortOrder = dataSortOrder; this.deleteFileFormat = deleteFileFormat; this.equalityFieldIds = equalityFieldIds; this.equalityDeleteRowSchema = equalityDeleteRowSchema; this.equalityDeleteSortOrder = equalityDeleteSortOrder; this.positionDeleteRowSchema = positionDeleteRowSchema; } protected abstract void configureDataWrite(Avro.DataWriteBuilder builder); protected abstract void configureEqualityDelete(Avro.DeleteWriteBuilder builder); protected abstract void configurePositionDelete(Avro.DeleteWriteBuilder builder); protected abstract void configureDataWrite(Parquet.DataWriteBuilder builder); protected abstract void configureEqualityDelete(Parquet.DeleteWriteBuilder builder); protected abstract void configurePositionDelete(Parquet.DeleteWriteBuilder builder); // TODO: provide ways to configure ORC delete writers once we support them protected abstract void configureDataWrite(ORC.DataWriteBuilder builder); @Override public DataWriter<T> newDataWriter(EncryptedOutputFile file, PartitionSpec spec, StructLike partition) { OutputFile outputFile = file.encryptingOutputFile(); EncryptionKeyMetadata keyMetadata = file.keyMetadata(); Map<String, String> properties = table.properties(); MetricsConfig metricsConfig = MetricsConfig.fromProperties(properties); try { switch (dataFileFormat) { case AVRO: Avro.DataWriteBuilder avroBuilder = Avro.writeData(outputFile) .schema(dataSchema) .setAll(properties) .metricsConfig(metricsConfig) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .withSortOrder(dataSortOrder) .overwrite(); configureDataWrite(avroBuilder); return avroBuilder.build(); case PARQUET: Parquet.DataWriteBuilder parquetBuilder = Parquet.writeData(outputFile) .schema(dataSchema) .setAll(properties) .metricsConfig(metricsConfig) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .withSortOrder(dataSortOrder) .overwrite(); configureDataWrite(parquetBuilder); return parquetBuilder.build(); case ORC: ORC.DataWriteBuilder orcBuilder = ORC.writeData(outputFile) .schema(dataSchema) .setAll(properties) .metricsConfig(metricsConfig) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .withSortOrder(dataSortOrder) .overwrite(); configureDataWrite(orcBuilder); return orcBuilder.build(); default: throw new UnsupportedOperationException("Unsupported data file format: " + dataFileFormat); } } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public EqualityDeleteWriter<T> newEqualityDeleteWriter(EncryptedOutputFile file, PartitionSpec spec, StructLike partition) { OutputFile outputFile = file.encryptingOutputFile(); EncryptionKeyMetadata keyMetadata = file.keyMetadata(); Map<String, String> properties = table.properties(); MetricsConfig metricsConfig = MetricsConfig.fromProperties(properties); try { switch (deleteFileFormat) { case AVRO: // TODO: support metrics configs in Avro equality delete writer Avro.DeleteWriteBuilder avroBuilder = Avro.writeDeletes(outputFile) .setAll(properties) .rowSchema(equalityDeleteRowSchema) .equalityFieldIds(equalityFieldIds) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .withSortOrder(equalityDeleteSortOrder) .overwrite(); configureEqualityDelete(avroBuilder); return avroBuilder.buildEqualityWriter(); case PARQUET: Parquet.DeleteWriteBuilder parquetBuilder = Parquet.writeDeletes(outputFile) .setAll(properties) .metricsConfig(metricsConfig) .rowSchema(equalityDeleteRowSchema) .equalityFieldIds(equalityFieldIds) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .withSortOrder(equalityDeleteSortOrder) .overwrite(); configureEqualityDelete(parquetBuilder); return parquetBuilder.buildEqualityWriter(); default: throw new UnsupportedOperationException("Unsupported format for equality deletes: " + deleteFileFormat); } } catch (IOException e) { throw new UncheckedIOException("Failed to create new equality delete writer", e); } } @Override public PositionDeleteWriter<T> newPositionDeleteWriter(EncryptedOutputFile file, PartitionSpec spec, StructLike partition) { OutputFile outputFile = file.encryptingOutputFile(); EncryptionKeyMetadata keyMetadata = file.keyMetadata(); Map<String, String> properties = table.properties(); // TODO: build and pass a correct metrics config for position deletes try { switch (deleteFileFormat) { case AVRO: Avro.DeleteWriteBuilder avroBuilder = Avro.writeDeletes(outputFile) .setAll(properties) .rowSchema(positionDeleteRowSchema) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .overwrite(); configurePositionDelete(avroBuilder); return avroBuilder.buildPositionWriter(); case PARQUET: Parquet.DeleteWriteBuilder parquetBuilder = Parquet.writeDeletes(outputFile) .setAll(properties) .rowSchema(positionDeleteRowSchema) .withSpec(spec) .withPartition(partition) .withKeyMetadata(keyMetadata) .overwrite(); configurePositionDelete(parquetBuilder); return parquetBuilder.buildPositionWriter(); default: throw new UnsupportedOperationException("Unsupported format for position deletes: " + deleteFileFormat); } } catch (IOException e) { throw new UncheckedIOException("Failed to create new position delete writer", e); } } protected Schema dataSchema() { return dataSchema; } protected Schema equalityDeleteRowSchema() { return equalityDeleteRowSchema; } protected Schema positionDeleteRowSchema() { return positionDeleteRowSchema; } }
1
33,707
Can we also update the equality delete branch below?
apache-iceberg
java
@@ -457,6 +457,11 @@ class StepExecutionContext(PlanExecutionContext, IStepContext): if step_output_handle == record.dagster_event.event_specific_data.step_output_handle: return True + # source is skipped so cannot load + for record in self.instance.all_logs(self.run_id, of_type=DagsterEventType.STEP_SKIPPED): + if step_output_handle.step_key == record.dagster_event.step_key: + return False + # can load from a previous run if self._get_source_run_id_from_logs(step_output_handle): return True
1
""" This module contains the execution context objects that are internal to the system. Not every property on these should be exposed to random Jane or Joe dagster user so we have a different layer of objects that encode the explicit public API in the user_context module """ from abc import ABC, abstractproperty from typing import TYPE_CHECKING, Any, Dict, Iterable, NamedTuple, Optional, Set, cast from dagster import check from dagster.core.definitions.hook_definition import HookDefinition from dagster.core.definitions.mode import ModeDefinition from dagster.core.definitions.op_definition import OpDefinition from dagster.core.definitions.pipeline_base import IPipeline from dagster.core.definitions.pipeline_definition import PipelineDefinition from dagster.core.definitions.policy import RetryPolicy from dagster.core.definitions.reconstructable import ReconstructablePipeline from dagster.core.definitions.resource_definition import ScopedResourcesBuilder from dagster.core.definitions.solid_definition import SolidDefinition from dagster.core.definitions.step_launcher import StepLauncher from dagster.core.errors import DagsterInvariantViolationError from dagster.core.execution.plan.outputs import StepOutputHandle from dagster.core.execution.plan.step import ExecutionStep from dagster.core.execution.retries import RetryMode from dagster.core.executor.base import Executor from dagster.core.log_manager import DagsterLogManager from dagster.core.storage.io_manager import IOManager from dagster.core.storage.pipeline_run import PipelineRun from dagster.core.system_config.objects import ResolvedRunConfig from dagster.core.types.dagster_type import DagsterType from .input import InputContext from .output import OutputContext, get_output_context if TYPE_CHECKING: from dagster.core.definitions.dependency import Node, NodeHandle from dagster.core.instance import DagsterInstance from dagster.core.execution.plan.plan import ExecutionPlan from dagster.core.definitions.resource_definition import Resources from .hook import HookContext class IPlanContext(ABC): """Context interface to represent run information that does not require access to user code. The information available via this interface is accessible to the system throughout a run. """ @abstractproperty def plan_data(self) -> "PlanData": raise NotImplementedError() @property def pipeline(self) -> IPipeline: return self.plan_data.pipeline @property def pipeline_run(self) -> PipelineRun: return self.plan_data.pipeline_run @property def run_id(self) -> str: return self.pipeline_run.run_id @property def run_config(self) -> dict: return self.pipeline_run.run_config @property def pipeline_name(self) -> str: return self.pipeline_run.pipeline_name @property def job_name(self) -> str: return self.pipeline_name @property def instance(self) -> "DagsterInstance": return self.plan_data.instance @property def raise_on_error(self) -> bool: return self.plan_data.raise_on_error @property def retry_mode(self) -> RetryMode: return self.plan_data.retry_mode @property def execution_plan(self): return self.plan_data.execution_plan @abstractproperty def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]: raise NotImplementedError() @property def log(self) -> DagsterLogManager: raise NotImplementedError() @property def logging_tags(self) -> Dict[str, str]: return self.log.logging_metadata.to_tags() def has_tag(self, key: str) -> bool: check.str_param(key, "key") return key in self.log.logging_metadata.pipeline_tags def get_tag(self, key: str) -> Optional[str]: check.str_param(key, "key") return self.log.logging_metadata.pipeline_tags.get(key) class PlanData(NamedTuple): """The data about a run that is available during both orchestration and execution. This object does not contain any information that requires access to user code, such as the pipeline definition and resources. """ pipeline: IPipeline pipeline_run: PipelineRun instance: "DagsterInstance" execution_plan: "ExecutionPlan" raise_on_error: bool = False retry_mode: RetryMode = RetryMode.DISABLED class ExecutionData(NamedTuple): """The data that is available to the system during execution. This object contains information that requires access to user code, such as the pipeline definition and resources. """ scoped_resources_builder: ScopedResourcesBuilder resolved_run_config: ResolvedRunConfig pipeline_def: PipelineDefinition mode_def: ModeDefinition class IStepContext(IPlanContext): """Interface to represent data to be available during either step orchestration or execution.""" @abstractproperty def step(self) -> ExecutionStep: raise NotImplementedError() @abstractproperty def solid_handle(self) -> "NodeHandle": raise NotImplementedError() class PlanOrchestrationContext(IPlanContext): """Context for the orchestration of a run. This context assumes inability to run user code directly. """ def __init__( self, plan_data: PlanData, log_manager: DagsterLogManager, executor: Executor, output_capture: Optional[Dict[StepOutputHandle, Any]], resume_from_failure: bool = False, ): self._plan_data = plan_data self._log_manager = log_manager self._executor = executor self._output_capture = output_capture self._resume_from_failure = resume_from_failure @property def plan_data(self) -> PlanData: return self._plan_data @property def reconstructable_pipeline(self) -> ReconstructablePipeline: if not isinstance(self.pipeline, ReconstructablePipeline): raise DagsterInvariantViolationError( "reconstructable_pipeline property must be a ReconstructablePipeline" ) return self.pipeline @property def log(self) -> DagsterLogManager: return self._log_manager @property def executor(self) -> Executor: return self._executor @property def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]: return self._output_capture def for_step(self, step: ExecutionStep) -> "IStepContext": return StepOrchestrationContext( plan_data=self.plan_data, log_manager=self._log_manager.with_tags(**step.logging_tags), executor=self.executor, step=step, output_capture=self.output_capture, ) @property def resume_from_failure(self) -> bool: return self._resume_from_failure class StepOrchestrationContext(PlanOrchestrationContext, IStepContext): """Context for the orchestration of a step. This context assumes inability to run user code directly. Thus, it does not include any resource information. """ def __init__(self, plan_data, log_manager, executor, step, output_capture): super(StepOrchestrationContext, self).__init__( plan_data, log_manager, executor, output_capture ) self._step = step @property def step(self) -> ExecutionStep: return self._step @property def solid_handle(self) -> "NodeHandle": return self.step.solid_handle class PlanExecutionContext(IPlanContext): """Context for the execution of a plan. This context assumes that user code can be run directly, and thus includes resource and information. """ def __init__( self, plan_data: PlanData, execution_data: ExecutionData, log_manager: DagsterLogManager, output_capture: Optional[Dict[StepOutputHandle, Any]] = None, ): self._plan_data = plan_data self._execution_data = execution_data self._log_manager = log_manager self._output_capture = output_capture @property def plan_data(self) -> PlanData: return self._plan_data @property def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]: return self._output_capture def for_step(self, step: ExecutionStep, previous_attempt_count: int = 0) -> IStepContext: return StepExecutionContext( plan_data=self.plan_data, execution_data=self._execution_data, log_manager=self._log_manager.with_tags(**step.logging_tags), step=step, output_capture=self.output_capture, previous_attempt_count=previous_attempt_count, ) @property def pipeline_def(self) -> PipelineDefinition: return self._execution_data.pipeline_def @property def resolved_run_config(self) -> ResolvedRunConfig: return self._execution_data.resolved_run_config @property def scoped_resources_builder(self) -> ScopedResourcesBuilder: return self._execution_data.scoped_resources_builder @property def log(self) -> DagsterLogManager: return self._log_manager def for_type(self, dagster_type: DagsterType) -> "TypeCheckContext": return TypeCheckContext( self.run_id, self.log, self._execution_data.scoped_resources_builder, dagster_type ) class StepExecutionContext(PlanExecutionContext, IStepContext): """Context for the execution of a step. This context assumes that user code can be run directly, and thus includes resource and information. """ def __init__( self, plan_data: PlanData, execution_data: ExecutionData, log_manager: DagsterLogManager, step: ExecutionStep, output_capture: Optional[Dict[StepOutputHandle, Any]], previous_attempt_count: int, ): from dagster.core.execution.resources_init import get_required_resource_keys_for_step super(StepExecutionContext, self).__init__( plan_data=plan_data, execution_data=execution_data, log_manager=log_manager, output_capture=output_capture, ) self._step = step self._required_resource_keys = get_required_resource_keys_for_step( plan_data.pipeline.get_definition(), step, plan_data.execution_plan, ) self._resources = execution_data.scoped_resources_builder.build( self._required_resource_keys ) self._previous_attempt_count = previous_attempt_count resources_iter = cast(Iterable, self._resources) step_launcher_resources = [ resource for resource in resources_iter if isinstance(resource, StepLauncher) ] self._step_launcher: Optional[StepLauncher] = None if len(step_launcher_resources) > 1: raise DagsterInvariantViolationError( "Multiple required resources for {described_op} have inherited StepLauncher" "There should be at most one step launcher resource per {node_type}.".format( described_op=self.describe_op(), node_type=self.solid_def.node_type_str ) ) elif len(step_launcher_resources) == 1: self._step_launcher = step_launcher_resources[0] self._step_exception: Optional[BaseException] = None self._step_output_capture: Dict[StepOutputHandle, Any] = {} @property def step(self) -> ExecutionStep: return self._step @property def solid_handle(self) -> "NodeHandle": return self.step.solid_handle @property def required_resource_keys(self) -> Set[str]: return self._required_resource_keys @property def resources(self) -> "Resources": return self._resources @property def step_launcher(self) -> Optional[StepLauncher]: return self._step_launcher @property def solid_def(self) -> SolidDefinition: return self.solid.definition.ensure_solid_def() @property def pipeline_def(self) -> PipelineDefinition: return self._execution_data.pipeline_def @property def mode_def(self) -> ModeDefinition: return self._execution_data.mode_def @property def solid(self) -> "Node": return self.pipeline_def.get_solid(self._step.solid_handle) @property def solid_retry_policy(self) -> Optional[RetryPolicy]: return self.pipeline_def.get_retry_policy_for_handle(self.solid_handle) def describe_op(self): if isinstance(self.solid_def, OpDefinition): return f'op "{str(self.solid_handle)}"' return f'solid "{str(self.solid_handle)}"' def get_io_manager(self, step_output_handle) -> IOManager: step_output = self.execution_plan.get_step_output(step_output_handle) io_manager_key = ( self.pipeline_def.get_solid(step_output.solid_handle) .output_def_named(step_output.name) .io_manager_key ) output_manager = getattr(self.resources, io_manager_key) return check.inst(output_manager, IOManager) def get_output_context(self, step_output_handle) -> OutputContext: return get_output_context( self.execution_plan, self.pipeline_def, self.resolved_run_config, step_output_handle, self._get_source_run_id(step_output_handle), log_manager=self.log, step_context=self, resources=None, version=self.execution_plan.get_version_for_step_output_handle(step_output_handle), ) def for_input_manager( self, name: str, config: Any, metadata: Any, dagster_type: DagsterType, source_handle: Optional[StepOutputHandle] = None, resource_config: Any = None, resources: Optional["Resources"] = None, ) -> InputContext: return InputContext( pipeline_name=self.pipeline_def.name, name=name, solid_def=self.solid_def, config=config, metadata=metadata, upstream_output=self.get_output_context(source_handle) if source_handle else None, dagster_type=dagster_type, log_manager=self.log, step_context=self, resource_config=resource_config, resources=resources, ) def for_hook(self, hook_def: HookDefinition) -> "HookContext": from .hook import HookContext return HookContext(self, hook_def) def can_load(self, step_output_handle: StepOutputHandle) -> bool: # Whether IO Manager can load the source # FIXME https://github.com/dagster-io/dagster/issues/3511 # This is a stopgap which asks the instance to check the event logs to find out step skipping from dagster.core.events import DagsterEventType # can load from upstream in the same run for record in self.instance.all_logs(self.run_id, of_type=DagsterEventType.STEP_OUTPUT): if step_output_handle == record.dagster_event.event_specific_data.step_output_handle: return True # can load from a previous run if self._get_source_run_id_from_logs(step_output_handle): return True return False def _get_source_run_id_from_logs(self, step_output_handle: StepOutputHandle) -> Optional[str]: from dagster.core.events import DagsterEventType # walk through event logs to find the right run_id based on the run lineage run_group = self.instance.get_run_group(self.run_id) if run_group is None: check.failed(f"Failed to load run group {self.run_id}") _, runs = run_group run_id_to_parent_run_id = {run.run_id: run.parent_run_id for run in runs} source_run_id = self.pipeline_run.parent_run_id while source_run_id: # note: this would cost N db calls where N = number of parent runs step_output_record = self.instance.all_logs( source_run_id, of_type=DagsterEventType.STEP_OUTPUT ) # if the parent run has yielded an StepOutput event for the given step output, # we find the source run id if [ r for r in step_output_record if r.dagster_event.step_key == step_output_handle.step_key and r.dagster_event.step_output_data.output_name == step_output_handle.output_name ]: return source_run_id else: # else, keep looking backwards source_run_id = run_id_to_parent_run_id.get(source_run_id) # when a fixed path is provided via io manager, it's able to run step subset using an execution # plan when the ascendant outputs were not previously created by dagster-controlled # computations. for example, in backfills, with fixed path io manager, we allow users to # "re-execute" runs with steps where the outputs weren't previously stored by dagster. return None def _get_source_run_id(self, step_output_handle: StepOutputHandle) -> Optional[str]: # determine if the step is not selected and if ( # this is re-execution self.pipeline_run.parent_run_id # we are not re-executing the entire pipeline and self.pipeline_run.step_keys_to_execute is not None # this step is not being executed and step_output_handle.step_key not in self.pipeline_run.step_keys_to_execute ): return self._get_source_run_id_from_logs(step_output_handle) else: return self.pipeline_run.run_id def capture_step_exception(self, exception: BaseException): self._step_exception = check.inst_param(exception, "exception", BaseException) @property def step_exception(self) -> Optional[BaseException]: return self._step_exception @property def step_output_capture(self) -> Dict[StepOutputHandle, Any]: return self._step_output_capture @property def previous_attempt_count(self) -> int: return self._previous_attempt_count class TypeCheckContext: """The ``context`` object available to a type check function on a DagsterType. Attributes: log (DagsterLogManager): Centralized log dispatch from user code. resources (Any): An object whose attributes contain the resources available to this op. run_id (str): The id of this job run. """ def __init__( self, run_id: str, log_manager: DagsterLogManager, scoped_resources_builder: ScopedResourcesBuilder, dagster_type: DagsterType, ): self._run_id = run_id self._log = log_manager self._resources = scoped_resources_builder.build(dagster_type.required_resource_keys) @property def resources(self) -> "Resources": return self._resources @property def run_id(self) -> str: return self._run_id @property def log(self) -> DagsterLogManager: return self._log
1
17,040
This doesn't solve the entire problem we talked abut yesterday, right? Because it's possible that the step itself didn't skip, but rather that it chose not to yield the output in question?
dagster-io-dagster
py
@@ -5,6 +5,7 @@ # ------------------------------------------------------------------------- import base64 +from datetime import datetime from hashlib import sha256 import json import os
1
# ------------------------------------------------------------------------- # The CodeChecker Infrastructure # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # ------------------------------------------------------------------------- import base64 from hashlib import sha256 import json import os import zlib import sqlalchemy import shared from codeCheckerDBAccess_v6 import ttypes from libcodechecker.logger import get_logger # TODO: This is a cross-subpackage import. from libcodechecker.server.database.run_db_model import * LOG = get_logger('system') def metadata_info(metadata_file): check_commands = [] check_durations = [] if not os.path.isfile(metadata_file): return check_commands, check_durations with open(metadata_file, 'r') as metadata: metadata_dict = json.load(metadata) if 'command' in metadata_dict: check_commands.append(metadata_dict['command']) if 'timestamps' in metadata_dict: check_durations.append( float(metadata_dict['timestamps']['end'] - metadata_dict['timestamps']['begin'])) return check_commands, check_durations def collect_paths_events(report, file_ids, files): """ This function creates the BugPathPos and BugPathEvent objects which belong to a report. report -- A report object from the parsed plist file. file_ids -- A dictionary which maps the file paths to file IDs in the database. files -- A list containing the file paths from the parsed plist file. The order of this list must be the same as in the plist file. """ bug_paths = [] bug_events = [] events = filter(lambda i: i.get('kind') == 'event', report.bug_path) # Create remaining data for bugs and send them to the server. In plist # file the source and target of the arrows are provided as starting and # ending ranges of the arrow. The path A->B->C is given as A->B and # B->C, thus range B is provided twice. So in the loop only target # points of the arrows are stored, and an extra insertion is done for # the source of the first arrow before the loop. report_path = filter(lambda i: i.get('kind') == 'control', report.bug_path) if report_path: start_range = report_path[0]['edges'][0]['start'] start1_line = start_range[0]['line'] start1_col = start_range[0]['col'] start2_line = start_range[1]['line'] start2_col = start_range[1]['col'] source_file_path = files[start_range[1]['file']] bug_paths.append(ttypes.BugPathPos( start1_line, start1_col, start2_line, start2_col, file_ids[source_file_path])) for path in report_path: try: end_range = path['edges'][0]['end'] end1_line = end_range[0]['line'] end1_col = end_range[0]['col'] end2_line = end_range[1]['line'] end2_col = end_range[1]['col'] source_file_path = files[end_range[1]['file']] bug_paths.append(ttypes.BugPathPos( end1_line, end1_col, end2_line, end2_col, file_ids[source_file_path])) except IndexError: # Edges might be empty nothing can be stored. continue for event in events: file_path = files[event['location']['file']] bug_events.append(ttypes.BugPathEvent( event['location']['line'], event['location']['col'], event['location']['line'], event['location']['col'], event['message'], file_ids[file_path])) return bug_paths, bug_events def store_bug_events(session, bugevents, report_id): """ """ for i, event in enumerate(bugevents): bpe = BugPathEvent(event.startLine, event.startCol, event.endLine, event.endCol, i, event.msg, event.fileId, report_id) session.add(bpe) def store_bug_path(session, bugpath, report_id): for i, piece in enumerate(bugpath): brp = BugReportPoint(piece.startLine, piece.startCol, piece.endLine, piece.endCol, i, piece.fileId, report_id) session.add(brp) def is_same_event_path(report_id, events, session): """ Checks if the given event path is the same as the one in the events argument. """ try: q = session.query(BugPathEvent) \ .filter(BugPathEvent.report_id == report_id) \ .order_by(BugPathEvent.order) for i, point2 in enumerate(q): if i == len(events): return False point1 = events[i] file1name = os.path.basename(session.query(File). get(point1.fileId).filepath) file2name = os.path.basename(session.query(File). get(point2.file_id).filepath) if point1.startCol != point2.col_begin or \ point1.endCol != point2.col_end or \ file1name != file2name or \ point1.msg != point2.msg: return False return True except Exception as ex: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.GENERAL, str(ex)) def addCheckerRun(session, command, name, tag, username, run_history_time, version, force): """ Store checker run related data to the database. By default updates the results if name already exists. Using the force flag removes existing analysis results for a run. """ try: LOG.debug("adding checker run") run = session.query(Run).filter(Run.name == name).one_or_none() if run and force: # Clean already collected results. if not run.can_delete: # Deletion is already in progress. msg = "Can't delete " + str(run.id) LOG.debug(msg) raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.DATABASE, msg) LOG.info('Removing previous analysis results ...') session.delete(run) checker_run = Run(name, version, command) session.add(checker_run) session.flush() run_id = checker_run.id elif run: # There is already a run, update the results. run.date = datetime.now() run.command = command run.duration = -1 session.flush() run_id = run.id else: # There is no run create new. checker_run = Run(name, version, command) session.add(checker_run) session.flush() run_id = checker_run.id # Add run to the history. LOG.debug("adding run to the history") if tag is not None: run_history = session.query(RunHistory) \ .filter(RunHistory.run_id == run_id, RunHistory.version_tag == tag) \ .one_or_none() if run_history: run_history.version_tag = None session.add(run_history) run_history = RunHistory(run_id, tag, username, run_history_time) session.add(run_history) session.flush() return run_id except Exception as ex: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.GENERAL, str(ex)) def finishCheckerRun(session, run_id): """ """ try: LOG.debug("Finishing checker run") run = session.query(Run).get(run_id) if not run: return False run.mark_finished() return True except Exception as ex: LOG.error(ex) return False def setRunDuration(session, run_id, duration): """ """ try: run = session.query(Run).get(run_id) if not run: return False run.duration = duration return True except Exception as ex: LOG.error(ex) return false def addReport(session, run_id, file_id, main_section, bugpath, events, detection_status, detection_time, severity_map): """ """ try: checker_name = main_section['check_name'] severity_name = severity_map.get(checker_name, 'UNSPECIFIED') severity = ttypes.Severity._NAMES_TO_VALUES[severity_name] report = Report(run_id, main_section['issue_hash_content_of_line_in_context'], file_id, main_section['description'], checker_name or 'NOT FOUND', main_section['category'], main_section['type'], main_section['location']['line'], main_section['location']['col'], severity, detection_status, detection_time) session.add(report) session.flush() LOG.debug("storing bug path") store_bug_path(session, bugpath, report.id) LOG.debug("storing events") store_bug_events(session, events, report.id) return report.id except Exception as ex: raise shared.ttypes.RequestFailed( shared.ttypes.ErrorCode.GENERAL, str(ex)) def changePathAndEvents(session, run_id, report_path_map): report_ids = report_path_map.keys() session.query(BugPathEvent) \ .filter(BugPathEvent.report_id.in_(report_ids)) \ .delete(synchronize_session=False) session.query(BugReportPoint) \ .filter(BugReportPoint.report_id.in_(report_ids)) \ .delete(synchronize_session=False) for report_id, (bug_path, events) in report_path_map.items(): store_bug_path(session, bug_path, report_id) store_bug_events(session, events, report_id) def addFileContent(session, filepath, content, content_hash, encoding): """ Add the necessary file contents. If the file is already stored in the database then its ID returns. If content_hash in None then this function calculates the content hash. Or if is available at the caller and is provided then it will not be calculated again. This function must not be called between addCheckerRun() and finishCheckerRun() functions when SQLite database is used! addCheckerRun() function opens a transaction which is closed by finishCheckerRun() and since SQLite doesn't support parallel transactions, this API call will wait until the other transactions finish. In the meantime the run adding transaction times out. """ if encoding == ttypes.Encoding.BASE64: content = base64.b64decode(content) if not content_hash: hasher = sha256() hasher.update(content) content_hash = hasher.hexdigest() file_content = session.query(FileContent).get(content_hash) if not file_content: try: compressed_content = zlib.compress(content, zlib.Z_BEST_COMPRESSION) fc = FileContent(content_hash, compressed_content) session.add(fc) session.commit() except sqlalchemy.exc.IntegrityError: # Other transaction moght have added the same content in # the meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == filepath) \ .one_or_none() if not file_record: try: file_record = File(filepath, content_hash) session.add(file_record) session.commit() except sqlalchemy.exc.IntegrityError as ex: # Other transaction might have added the same file in the # meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == filepath) \ .one_or_none() return file_record.id def addFileRecord(session, filepath, content_hash): """ Add the necessary file record pointing to an already existing content. Returns the added file record id or None, if the content_hash is not found. This function must not be called between addCheckerRun() and finishCheckerRun() functions when SQLite database is used! addCheckerRun() function opens a transaction which is closed by finishCheckerRun() and since SQLite doesn't support parallel transactions, this API call will wait until the other transactions finish. In the meantime the run adding transaction times out. """ file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == filepath) \ .one_or_none() if file_record: return file_record.id try: file_record = File(filepath, content_hash) session.add(file_record) session.commit() except sqlalchemy.exc.IntegrityError as ex: # Other transaction might have added the same file in the # meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == filepath).one_or_none() return file_record.id if file_record else None
1
9,332
I also do not see any usages of this import. Is this a fix for a crash?
Ericsson-codechecker
c
@@ -794,6 +794,9 @@ func (engine *DockerTaskEngine) AddTask(task *apitask.Task) { // This will update the container desired status task.UpdateDesiredStatus() + // This will update any dependencies for awsvpc network mode before the task is started. + engine.updateTaskENIDependencies(task) + engine.state.AddTask(task) if dependencygraph.ValidDependencies(task, engine.cfg) { engine.startTask(task)
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package engine contains the core logic for managing tasks package engine import ( "context" "fmt" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "github.com/aws/amazon-ecs-agent/agent/api" apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" apitask "github.com/aws/amazon-ecs-agent/agent/api/task" apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/containermetadata" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/data" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" "github.com/aws/amazon-ecs-agent/agent/ecscni" "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" "github.com/aws/amazon-ecs-agent/agent/eventstream" "github.com/aws/amazon-ecs-agent/agent/metrics" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/taskresource" "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" "github.com/aws/amazon-ecs-agent/agent/utils" "github.com/aws/amazon-ecs-agent/agent/utils/retry" utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" dockercontainer "github.com/docker/docker/api/types/container" "github.com/cihub/seelog" "github.com/docker/docker/api/types" "github.com/pkg/errors" ) const ( //DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint DockerEndpointEnvVariable = "DOCKER_HOST" // DockerDefaultEndpoint is the default value for the Docker endpoint DockerDefaultEndpoint = "unix:///var/run/docker.sock" labelPrefix = "com.amazonaws.ecs." labelTaskARN = labelPrefix + "task-arn" labelContainerName = labelPrefix + "container-name" labelTaskDefinitionFamily = labelPrefix + "task-definition-family" labelTaskDefinitionVersion = labelPrefix + "task-definition-version" labelCluster = labelPrefix + "cluster" cniSetupTimeout = 1 * time.Minute cniCleanupTimeout = 30 * time.Second minGetIPBridgeTimeout = time.Second maxGetIPBridgeTimeout = 10 * time.Second getIPBridgeRetryJitterMultiplier = 0.2 getIPBridgeRetryDelayMultiplier = 2 ipamCleanupTmeout = 5 * time.Second minEngineConnectRetryDelay = 2 * time.Second maxEngineConnectRetryDelay = 200 * time.Second engineConnectRetryJitterMultiplier = 0.20 engineConnectRetryDelayMultiplier = 1.5 // logDriverTypeFirelens is the log driver type for containers that want to use the firelens container to send logs. logDriverTypeFirelens = "awsfirelens" logDriverTypeFluentd = "fluentd" logDriverTag = "tag" logDriverFluentdAddress = "fluentd-address" dataLogDriverPath = "/data/firelens/" logDriverAsyncConnect = "fluentd-async-connect" logDriverSubSecondPrecision = "fluentd-sub-second-precision" dataLogDriverSocketPath = "/socket/fluent.sock" socketPathPrefix = "unix://" // fluentTagDockerFormat is the format for the log tag, which is "containerName-firelens-taskID" fluentTagDockerFormat = "%s-firelens-%s" // Environment variables are needed for firelens fluentNetworkHost = "FLUENT_HOST" fluentNetworkPort = "FLUENT_PORT" FluentNetworkPortValue = "24224" FluentAWSVPCHostValue = "127.0.0.1" defaultMonitorExecAgentsInterval = 15 * time.Minute ) // DockerTaskEngine is a state machine for managing a task and its containers // in ECS. // // DockerTaskEngine implements an abstraction over the DockerGoClient so that // it does not have to know about tasks, only containers // The DockerTaskEngine interacts with Docker to implement a TaskEngine type DockerTaskEngine struct { // implements TaskEngine cfg *config.Config ctx context.Context initialized bool mustInitLock sync.Mutex // state stores all tasks this task engine is aware of, including their // current state and mappings to/from dockerId and name. // This is used to checkpoint state to disk so tasks may survive agent // failures or updates state dockerstate.TaskEngineState managedTasks map[string]*managedTask taskStopGroup *utilsync.SequentialWaitGroup events <-chan dockerapi.DockerContainerChangeEvent stateChangeEvents chan statechange.Event client dockerapi.DockerClient dataClient data.Client cniClient ecscni.CNIClient containerChangeEventStream *eventstream.EventStream stopEngine context.CancelFunc // tasksLock is a mutex that the task engine must acquire before changing // any task's state which it manages. Since this is a lock that encompasses // all tasks, it must not acquire it for any significant duration // The write mutex should be taken when adding and removing tasks from managedTasks. tasksLock sync.RWMutex credentialsManager credentials.Manager _time ttime.Time _timeOnce sync.Once imageManager ImageManager containerStatusToTransitionFunction map[apicontainerstatus.ContainerStatus]transitionApplyFunc metadataManager containermetadata.Manager // taskSteadyStatePollInterval is the duration that a managed task waits // once the task gets into steady state before polling the state of all of // the task's containers to re-evaluate if the task is still in steady state // This is set to defaultTaskSteadyStatePollInterval in production code. // This can be used by tests that are looking to ensure that the steady state // verification logic gets executed to set it to a low interval taskSteadyStatePollInterval time.Duration taskSteadyStatePollIntervalJitter time.Duration resourceFields *taskresource.ResourceFields // handleDelay is a function used to delay cleanup. Implementation is // swappable for testing handleDelay func(duration time.Duration) monitorExecAgentsTicker *time.Ticker execCmdMgr execcmd.Manager monitorExecAgentsInterval time.Duration } // NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine. // The distinction between created and initialized is that when created it may // be serialized/deserialized, but it will not communicate with docker until it // is also initialized. func NewDockerTaskEngine(cfg *config.Config, client dockerapi.DockerClient, credentialsManager credentials.Manager, containerChangeEventStream *eventstream.EventStream, imageManager ImageManager, state dockerstate.TaskEngineState, metadataManager containermetadata.Manager, resourceFields *taskresource.ResourceFields, execCmdMgr execcmd.Manager) *DockerTaskEngine { dockerTaskEngine := &DockerTaskEngine{ cfg: cfg, client: client, dataClient: data.NewNoopClient(), state: state, managedTasks: make(map[string]*managedTask), taskStopGroup: utilsync.NewSequentialWaitGroup(), stateChangeEvents: make(chan statechange.Event), credentialsManager: credentialsManager, containerChangeEventStream: containerChangeEventStream, imageManager: imageManager, cniClient: ecscni.NewClient(cfg.CNIPluginsPath), metadataManager: metadataManager, taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval, taskSteadyStatePollIntervalJitter: defaultTaskSteadyStatePollIntervalJitter, resourceFields: resourceFields, handleDelay: time.Sleep, execCmdMgr: execCmdMgr, monitorExecAgentsInterval: defaultMonitorExecAgentsInterval, } dockerTaskEngine.initializeContainerStatusToTransitionFunction() return dockerTaskEngine } func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() { containerStatusToTransitionFunction := map[apicontainerstatus.ContainerStatus]transitionApplyFunc{ apicontainerstatus.ContainerPulled: engine.pullContainer, apicontainerstatus.ContainerCreated: engine.createContainer, apicontainerstatus.ContainerRunning: engine.startContainer, apicontainerstatus.ContainerResourcesProvisioned: engine.provisionContainerResources, apicontainerstatus.ContainerStopped: engine.stopContainer, } engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction } // ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1 // Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718) // Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks. var ImagePullDeleteLock sync.RWMutex // UnmarshalJSON restores a previously marshaled task-engine state from json func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error { return engine.state.UnmarshalJSON(data) } // MarshalJSON marshals into state directly func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) { return engine.state.MarshalJSON() } // Init initializes a DockerTaskEngine such that it may communicate with docker // and operate normally. // This function must be called before any other function, except serializing and deserializing, can succeed without error. func (engine *DockerTaskEngine) Init(ctx context.Context) error { derivedCtx, cancel := context.WithCancel(ctx) engine.stopEngine = cancel engine.ctx = derivedCtx // Open the event stream before we sync state so that e.g. if a container // goes from running to stopped after we sync with it as "running" we still // have the "went to stopped" event pending so we can be up to date. err := engine.openEventstream(derivedCtx) if err != nil { return err } engine.synchronizeState() // Now catch up and start processing new events per normal go engine.handleDockerEvents(derivedCtx) engine.initialized = true go engine.startPeriodicExecAgentsMonitoring(derivedCtx) return nil } func (engine *DockerTaskEngine) startPeriodicExecAgentsMonitoring(ctx context.Context) { engine.monitorExecAgentsTicker = time.NewTicker(engine.monitorExecAgentsInterval) for { select { case <-engine.monitorExecAgentsTicker.C: go engine.monitorExecAgentProcesses(ctx) case <-ctx.Done(): engine.monitorExecAgentsTicker.Stop() return } } } func (engine *DockerTaskEngine) monitorExecAgentProcesses(ctx context.Context) { // TODO: [ecs-exec]add jitter between containers to not overload docker with top calls engine.tasksLock.RLock() defer engine.tasksLock.RUnlock() for _, mTask := range engine.managedTasks { task := mTask.Task if task.GetKnownStatus() != apitaskstatus.TaskRunning { continue } for _, c := range task.Containers { if execcmd.IsExecEnabledContainer(c) { if ma, _ := c.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed { go engine.monitorExecAgentRunning(ctx, mTask, c) } } } } } func (engine *DockerTaskEngine) monitorExecAgentRunning(ctx context.Context, mTask *managedTask, c *apicontainer.Container) { if !c.IsRunning() { return } task := mTask.Task dockerID, err := engine.getDockerID(task, c) if err != nil { seelog.Errorf("Task engine [%s]: Could not retrieve docker id for container", task.Arn) return } // Sleeping here so that all the containers do not call inspect/start exec agent process // at the same time. // The max sleep is 50% of the monitor interval to allow enough buffer time // to finish monitoring. // This is inspired from containers streaming stats from Docker. time.Sleep(retry.AddJitter(time.Nanosecond, engine.monitorExecAgentsInterval/2)) status, err := engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID) if err != nil { seelog.Errorf("Task engine [%s]: Failed to restart ExecCommandAgent Process for container [%s]: %v", task.Arn, dockerID, err) mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent cannot be restarted") } if status == execcmd.Restarted { mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent restarted") } } // MustInit blocks and retries until an engine can be initialized. func (engine *DockerTaskEngine) MustInit(ctx context.Context) { if engine.initialized { return } engine.mustInitLock.Lock() defer engine.mustInitLock.Unlock() errorOnce := sync.Once{} taskEngineConnectBackoff := retry.NewExponentialBackoff(minEngineConnectRetryDelay, maxEngineConnectRetryDelay, engineConnectRetryJitterMultiplier, engineConnectRetryDelayMultiplier) retry.RetryWithBackoff(taskEngineConnectBackoff, func() error { if engine.initialized { return nil } err := engine.Init(ctx) if err != nil { errorOnce.Do(func() { seelog.Errorf("Task engine: could not connect to docker daemon: %v", err) }) } return err }) } // SetDataClient sets the saver that is used by the DockerTaskEngine. func (engine *DockerTaskEngine) SetDataClient(client data.Client) { engine.dataClient = client } // Shutdown makes a best-effort attempt to cleanup after the task engine. // This should not be relied on for anything more complicated than testing. func (engine *DockerTaskEngine) Shutdown() { engine.stopEngine() engine.Disable() } // Disable prevents this engine from managing any additional tasks. func (engine *DockerTaskEngine) Disable() { engine.tasksLock.Lock() } // isTaskManaged checks if task for the corresponding arn is present func (engine *DockerTaskEngine) isTaskManaged(arn string) bool { engine.tasksLock.RLock() defer engine.tasksLock.RUnlock() _, ok := engine.managedTasks[arn] return ok } // synchronizeState explicitly goes through each docker container stored in // "state" and updates its KnownStatus appropriately, as well as queueing up // events to push upstream. It also initializes some fields of task resources and eni attachments that won't be populated // from loading state file. func (engine *DockerTaskEngine) synchronizeState() { engine.tasksLock.Lock() defer engine.tasksLock.Unlock() imageStates := engine.state.AllImageStates() if len(imageStates) != 0 { engine.imageManager.AddAllImageStates(imageStates) } eniAttachments := engine.state.AllENIAttachments() for _, eniAttachment := range eniAttachments { timeoutFunc := func() { eniAttachment, ok := engine.state.ENIByMac(eniAttachment.MACAddress) if !ok { seelog.Warnf("Ignoring unmanaged ENI attachment with MAC address: %s", eniAttachment.MACAddress) return } if !eniAttachment.IsSent() { seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record %s", eniAttachment.String()) engine.removeENIAttachmentData(eniAttachment.MACAddress) engine.state.RemoveENIAttachment(eniAttachment.MACAddress) } } err := eniAttachment.Initialize(timeoutFunc) if err != nil { // The only case where we get an error from Initialize is that the attachment has expired. In that case, remove the expired // attachment from state. seelog.Warnf("ENI attachment has expired. Removing it from state. %s", eniAttachment.String()) engine.removeENIAttachmentData(eniAttachment.MACAddress) engine.state.RemoveENIAttachment(eniAttachment.MACAddress) } } tasks := engine.state.AllTasks() tasksToStart := engine.filterTasksToStartUnsafe(tasks) for _, task := range tasks { task.InitializeResources(engine.resourceFields) engine.saveTaskData(task) } for _, task := range tasksToStart { engine.startTask(task) } } // filterTasksToStartUnsafe filters only the tasks that need to be started after // the agent has been restarted. It also synchronizes states of all of the containers // in tasks that need to be started. func (engine *DockerTaskEngine) filterTasksToStartUnsafe(tasks []*apitask.Task) []*apitask.Task { var tasksToStart []*apitask.Task for _, task := range tasks { conts, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { // task hasn't started processing, no need to check container status tasksToStart = append(tasksToStart, task) continue } for _, cont := range conts { engine.synchronizeContainerStatus(cont, task) engine.saveDockerContainerData(cont) // persist the container with the updated information. } tasksToStart = append(tasksToStart, task) // Put tasks that are stopped by acs but hasn't been stopped in wait group if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 { engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1) } } return tasksToStart } // updateContainerMetadata sets the container metadata from the docker inspect func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) { container.SetCreatedAt(metadata.CreatedAt) container.SetStartedAt(metadata.StartedAt) container.SetFinishedAt(metadata.FinishedAt) // Set the labels if it's not set if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 { container.SetLabels(metadata.Labels) } // Update volume for empty volume container if metadata.Volumes != nil { if container.IsInternal() { task.UpdateMountPoints(container, metadata.Volumes) } else { container.SetVolumes(metadata.Volumes) } } // Set Exitcode if it's not set if metadata.ExitCode != nil { container.SetKnownExitCode(metadata.ExitCode) } // Set port mappings if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 { container.SetKnownPortBindings(metadata.PortBindings) } // update the container health information if container.HealthStatusShouldBeReported() { container.SetHealthStatus(metadata.Health) } container.SetNetworkMode(metadata.NetworkMode) container.SetNetworkSettings(metadata.NetworkSettings) } // synchronizeContainerStatus checks and updates the container status with docker func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) { if container.DockerID == "" { seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s", task.Arn, container.DockerName) // Figure out the dockerid describedContainer, err := engine.client.InspectContainer(engine.ctx, container.DockerName, dockerclient.InspectContainerTimeout) if err != nil { seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v", task.Arn, container.DockerName, err) } else { // update the container metadata in case the container was created during agent restart metadata := dockerapi.MetadataFromContainer(describedContainer) updateContainerMetadata(&metadata, container.Container, task) container.DockerID = describedContainer.ID container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State)) // update mappings that need dockerid engine.state.AddContainer(container, task) err := engine.imageManager.RecordContainerReference(container.Container) if err != nil { seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v", task.Arn, err) } } return } currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID) if metadata.Error != nil { currentState = apicontainerstatus.ContainerStopped // If this is a Docker API error if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName { seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v", task.Arn, container.DockerID, container.DockerName, metadata.Error) if !container.Container.KnownTerminal() { container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{}) err := engine.imageManager.RemoveContainerReferenceFromImageState(container.Container) if err != nil { seelog.Warnf("Task engine [%s]: could not remove container reference for image state %s: %v", container.Container.Image, err) } } } else { // If this is a container state error updateContainerMetadata(&metadata, container.Container, task) container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error) } } else { // update the container metadata in case the container status/metadata changed during agent restart updateContainerMetadata(&metadata, container.Container, task) err := engine.imageManager.RecordContainerReference(container.Container) if err != nil { seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v", task.Arn, err) } if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.Container.IsMetadataFileUpdated() { go engine.updateMetadataFile(task, container) } } if currentState > container.Container.GetKnownStatus() { // update the container known status container.Container.SetKnownStatus(currentState) } // Update task ExecutionStoppedAt timestamp task.RecordExecutionStoppedAt(container.Container) } // checkTaskState inspects the state of all containers within a task and writes // their state to the managed task's container channel. func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) { defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")() for _, container := range task.Containers { dockerID, err := engine.getDockerID(task, container) if err != nil { continue } status, metadata := engine.client.DescribeContainer(engine.ctx, dockerID) engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { managedTask.emitDockerContainerChange(dockerContainerChange{ container: container, event: dockerapi.DockerContainerChangeEvent{ Status: status, DockerContainerMetadata: metadata, }, }) } } } // sweepTask deletes all the containers associated with a task func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) { for _, cont := range task.Containers { err := engine.removeContainer(task, cont) if err != nil { seelog.Infof("Task engine [%s]: unable to remove old container [%s]: %v", task.Arn, cont.Name, err) } // Internal container(created by ecs-agent) state isn't recorded if cont.IsInternal() { continue } err = engine.imageManager.RemoveContainerReferenceFromImageState(cont) if err != nil { seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v", task.Arn, cont.Name, err) } } // Clean metadata directory for task if engine.cfg.ContainerMetadataEnabled.Enabled() { err := engine.metadataManager.Clean(task.Arn) if err != nil { seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err) } } } var removeAll = os.RemoveAll func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) { for _, resource := range task.GetResources() { err := resource.Cleanup() if err != nil { seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v", task.Arn, resource.GetName(), err) } else { seelog.Infof("Task engine [%s]: resource %s cleanup complete", task.Arn, resource.GetName()) } } if execcmd.IsExecEnabledTask(task) { // cleanup host exec agent log dirs if tID, err := task.GetID(); err != nil { seelog.Warnf("Task Engine[%s]: error getting task ID for ExecAgent logs cleanup: %v", task.Arn, err) } else { if err := removeAll(filepath.Join(execcmd.ECSAgentExecLogDir, tID)); err != nil { seelog.Warnf("Task Engine[%s]: unable to remove ExecAgent host logs for task: %v", task.Arn, err) } } } // Now remove ourselves from the global state and cleanup channels engine.tasksLock.Lock() engine.state.RemoveTask(task) taskENIs := task.GetTaskENIs() for _, taskENI := range taskENIs { // ENIs that exist only as logical associations on another interface do not have // attachments that need to be removed. if taskENI.IsStandardENI() { seelog.Debugf("Task engine [%s]: removing eni %s from agent state", task.Arn, taskENI.ID) engine.removeENIAttachmentData(taskENI.MacAddress) engine.state.RemoveENIAttachment(taskENI.MacAddress) } else { seelog.Debugf("Task engine [%s]: skipping removing logical eni %s from agent state", task.Arn, taskENI.ID) } } // Remove task and container data from database. engine.removeTaskData(task) seelog.Infof("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn) delete(engine.managedTasks, task.Arn) engine.tasksLock.Unlock() } func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) { event, err := api.NewTaskStateChangeEvent(task, reason) if err != nil { seelog.Infof("Task engine [%s]: unable to create task state change event: %v", task.Arn, err) return } seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String()) engine.stateChangeEvents <- event } // startTask creates a managedTask construct to track the task and then begins // pushing it towards its desired state when allowed startTask is protected by // the tasksLock lock of 'AddTask'. It should not be called from anywhere // else and should exit quickly to allow AddTask to do more work. func (engine *DockerTaskEngine) startTask(task *apitask.Task) { // Create a channel that may be used to communicate with this task, survey // what tasks need to be waited for for this one to start, and then spin off // a goroutine to oversee this task thisTask := engine.newManagedTask(task) thisTask._time = engine.time() go thisTask.overseeTask() } func (engine *DockerTaskEngine) time() ttime.Time { engine._timeOnce.Do(func() { if engine._time == nil { engine._time = &ttime.DefaultTime{} } }) return engine._time } // openEventstream opens, but does not consume, the docker event stream func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error { events, err := engine.client.ContainerEvents(ctx) if err != nil { return err } engine.events = events return nil } // handleDockerEvents must be called after openEventstream; it processes each // event that it reads from the docker eventstream func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) { for { select { case <-ctx.Done(): return case event := <-engine.events: engine.handleDockerEvent(event) } } } // handleDockerEvent is the entrypoint for task modifications originating with // events occurring through Docker, outside the task engine itself. // handleDockerEvent is responsible for taking an event that correlates to a // container and placing it in the context of the task to which that container // belongs. func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) { seelog.Debugf("Task engine: handling a docker event: %s", event.String()) task, ok := engine.state.TaskByID(event.DockerID) if !ok { seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task", event.DockerID) return } cont, ok := engine.state.ContainerByID(event.DockerID) if !ok { seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container", event.DockerID) return } // Container health status change does not affect the container status // no need to process this in task manager if event.Type == apicontainer.ContainerHealthEvent { if cont.Container.HealthStatusShouldBeReported() { seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v", cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health) cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health) } return } engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if !ok { seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s", task.Arn, event.String()) return } seelog.Debugf("Task engine [%s]: writing docker event to the task: %s", task.Arn, event.String()) managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event}) seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s", task.Arn, event.String()) } // StateChangeEvents returns channels to read task and container state changes. These // changes should be read as soon as possible as them not being read will block // processing the task referenced by the event. func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event { return engine.stateChangeEvents } // AddTask starts tracking a task func (engine *DockerTaskEngine) AddTask(task *apitask.Task) { defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")() err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager, engine.resourceFields, engine.client, engine.ctx) if err != nil { seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err) task.SetKnownStatus(apitaskstatus.TaskStopped) task.SetDesiredStatus(apitaskstatus.TaskStopped) engine.emitTaskEvent(task, err.Error()) return } engine.tasksLock.Lock() defer engine.tasksLock.Unlock() existingTask, exists := engine.state.TaskByArn(task.Arn) if !exists { // This will update the container desired status task.UpdateDesiredStatus() engine.state.AddTask(task) if dependencygraph.ValidDependencies(task, engine.cfg) { engine.startTask(task) } else { seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn) task.SetKnownStatus(apitaskstatus.TaskStopped) task.SetDesiredStatus(apitaskstatus.TaskStopped) err := TaskDependencyError{task.Arn} engine.emitTaskEvent(task, err.Error()) } return } // Update task engine.updateTaskUnsafe(existingTask, task) } // ListTasks returns the tasks currently managed by the DockerTaskEngine func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) { return engine.state.AllTasks(), nil } // GetTaskByArn returns the task identified by that ARN func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) { return engine.state.TaskByArn(arn) } func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { switch container.Type { case apicontainer.ContainerCNIPause, apicontainer.ContainerNamespacePause: // pause images are managed at startup return dockerapi.DockerContainerMetadata{} } if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) { // Record the pullStoppedAt timestamp defer func() { timestamp := engine.time().Now() task.SetPullStoppedAt(timestamp) }() seelog.Infof("Task engine [%s]: pulling image %s for container %s concurrently", task.Arn, container.Image, container.Name) return engine.concurrentPull(task, container) } // No pull image is required, the cached image will be used. // Add the container that uses the cached image to the pulled container state. dockerContainer := &apicontainer.DockerContainer{ Container: container, } engine.state.AddPulledContainer(dockerContainer, task) // No pull image is required, just update container reference and use cached image. engine.updateContainerReference(false, container, task.Arn) // Return the metadata without any error return dockerapi.DockerContainerMetadata{Error: nil} } // imagePullRequired returns true if pulling image is required, or return false if local image cache // should be used, by inspecting the agent pull behavior variable defined in config. The caller has // to make sure the container passed in is not an internal container. func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType, container *apicontainer.Container, taskArn string) bool { switch imagePullBehavior { case config.ImagePullOnceBehavior: // If this image has been pulled successfully before, don't pull the image, // otherwise pull the image as usual, regardless whether the image exists or not // (the image can be prepopulated with the AMI and never be pulled). imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) if ok && imageState.GetPullSucceeded() { seelog.Infof("Task engine [%s]: image %s for container %s has been pulled once, not pulling it again", taskArn, container.Image, container.Name) return false } return true case config.ImagePullPreferCachedBehavior: // If the behavior is prefer cached, don't pull if we found cached image // by inspecting the image. _, err := engine.client.InspectImage(container.Image) if err != nil { return true } seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s", taskArn, container.Image, container.Name) return false default: // Need to pull the image for always and default agent pull behavior return true } } func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image %s for container %s", task.Arn, container.Image, container.Name) ImagePullDeleteLock.RLock() seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image %s for container %s", task.Arn, container.Image, container.Name) defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image %s for container %s", task.Arn, container.Image, container.Name) defer ImagePullDeleteLock.RUnlock() // Record the task pull_started_at timestamp pullStart := engine.time().Now() ok := task.SetPullStartedAt(pullStart) if ok { seelog.Infof("Task engine [%s]: recording timestamp for starting image pulltime: %s", task.Arn, pullStart) } metadata := engine.pullAndUpdateContainerReference(task, container) if metadata.Error == nil { seelog.Infof("Task engine [%s]: finished pulling image %s for container %s in %s", task.Arn, container.Image, container.Name, time.Since(pullStart).String()) } else { seelog.Errorf("Task engine [%s]: failed to pull image %s for container %s: %v", task.Arn, container.Image, container.Name, metadata.Error) } return metadata } func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { // If a task is blocked here for some time, and before it starts pulling image, // the task's desired status is set to stopped, then don't pull the image if task.GetDesiredStatus() == apitaskstatus.TaskStopped { seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping pulling image %s for container %s", task.Arn, container.Image, container.Name) container.SetDesiredStatus(apicontainerstatus.ContainerStopped) return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}} } // Set the credentials for pull from ECR if necessary if container.ShouldPullWithExecutionRole() { executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID()) if !ok { seelog.Errorf("Task engine [%s]: unable to acquire ECR credentials for image %s for container %s", task.Arn, container.Image, container.Name) return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotPullECRContainerError{ FromError: errors.New("engine ecr credentials: not found"), }, } } iamCredentials := executionCredentials.GetIAMRoleCredentials() container.SetRegistryAuthCredentials(iamCredentials) // Clean up the ECR pull credentials after pulling defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{}) } // Apply registry auth data from ASM if required if container.ShouldPullWithASMAuth() { if err := task.PopulateASMAuthData(container); err != nil { seelog.Errorf("Task engine [%s]: unable to acquire Docker registry credentials for image %s for container %s", task.Arn, container.Image, container.Name) return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotPullContainerAuthError{ FromError: errors.New("engine docker private registry credentials: not found"), }, } } defer container.SetASMDockerAuthConfig(types.AuthConfig{}) } metadata := engine.client.PullImage(engine.ctx, container.Image, container.RegistryAuthentication, engine.cfg.ImagePullTimeout) // Don't add internal images(created by ecs-agent) into imagemanger state if container.IsInternal() { return metadata } pullSucceeded := metadata.Error == nil findCachedImage := false if !pullSucceeded { // If Agent failed to pull an image when // 1. DependentContainersPullUpfront is enabled // 2. ImagePullBehavior is not set to always // search the image in local cached images if engine.cfg.DependentContainersPullUpfront.Enabled() && engine.cfg.ImagePullBehavior != config.ImagePullAlwaysBehavior { if _, err := engine.client.InspectImage(container.Image); err != nil { seelog.Errorf("Task engine [%s]: failed to find cached image %s for container %s", task.Arn, container.Image, container.Name) // Stop the task if the container is an essential container, // and the image is not available in both remote and local caches if container.IsEssential() { task.SetDesiredStatus(apitaskstatus.TaskStopped) engine.emitTaskEvent(task, fmt.Sprintf("%s: %s", metadata.Error.ErrorName(), metadata.Error.Error())) } return dockerapi.DockerContainerMetadata{Error: metadata.Error} } seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s", task.Arn, container.Image, container.Name) findCachedImage = true } } if pullSucceeded || findCachedImage { dockerContainer := &apicontainer.DockerContainer{ Container: container, } engine.state.AddPulledContainer(dockerContainer, task) } engine.updateContainerReference(pullSucceeded, container, task.Arn) return metadata } func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) { err := engine.imageManager.RecordContainerReference(container) if err != nil { seelog.Errorf("Task engine [%s]: unable to add container reference to image state: %v", taskArn, err) } imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) if ok && pullSucceeded { // Only need to update the pullSucceeded flag of the image state when its not yet set to true. if !imageState.GetPullSucceeded() { imageState.SetPullSucceeded(true) err = engine.dataClient.SaveImageState(imageState) if err != nil { seelog.Warnf("Task engine [%s]: unable to save image state: %v", taskArn, err) } } } engine.state.AddImageState(imageState) } func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } dockerContainerName := "" containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { containerMap = make(map[string]*apicontainer.DockerContainer) } else { // looking for container that has docker name but not created for _, v := range containerMap { if v.Container.Name == container.Name { dockerContainerName = v.DockerName break } } } // Resolve HostConfig // we have to do this in create, not start, because docker no longer handles // merging create config with start hostconfig the same; e.g. memory limits // get lost dockerClientVersion, versionErr := client.APIVersion() if versionErr != nil { return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}} } hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg) if hcerr != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)} } if container.AWSLogAuthExecutionRole() { err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } firelensConfig := container.GetFirelensConfig() if firelensConfig != nil { err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } cerr := task.PopulateSecretLogOptionsToFirelensContainer(container) if cerr != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)} } if firelensConfig.Type == firelens.FirelensConfigTypeFluentd { // For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access // the socket created by Docker. container.MergeEnvironmentVariables(map[string]string{ "FLUENT_UID": "0", }) } } // If the container is using a special log driver type "awsfirelens", it means the container wants to use // the firelens container to send logs. In this case, override the log driver type to be fluentd // and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container. // Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge // and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/. if hostConfig.LogConfig.Type == logDriverTypeFirelens { hostConfig.LogConfig = getFirelensLogConfig(task, container, hostConfig, engine.cfg) if task.IsNetworkModeAWSVPC() { container.MergeEnvironmentVariables(map[string]string{ fluentNetworkHost: FluentAWSVPCHostValue, fluentNetworkPort: FluentNetworkPortValue, }) } else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode { ipAddress, ok := getContainerHostIP(task.GetFirelensContainer().GetNetworkSettings()) if !ok { err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)} } container.MergeEnvironmentVariables(map[string]string{ fluentNetworkHost: ipAddress, fluentNetworkPort: FluentNetworkPortValue, }) } } //Apply the log driver secret into container's LogConfig and Env secrets to container.Environment hasSecretAsEnvOrLogDriver := func(s apicontainer.Secret) bool { return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver } if container.HasSecret(hasSecretAsEnvOrLogDriver) { err := task.PopulateSecrets(hostConfig, container) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } // Populate credentialspec resource if container.RequiresCredentialSpec() { seelog.Debugf("Obtained container %s with credentialspec resource requirement for task %s.", container.Name, task.Arn) var credSpecResource *credentialspec.CredentialSpecResource resource, ok := task.GetCredentialSpecResource() if !ok || len(resource) <= 0 { resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)} } credSpecResource = resource[0].(*credentialspec.CredentialSpecResource) containerCredSpec, err := container.GetCredentialSpec() if err == nil && containerCredSpec != "" { // CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec) if err != nil || desiredCredSpecInjection == "" { missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)} } // Inject containers' hostConfig.SecurityOpt with the credentialspec resource seelog.Infof("Injecting container %s with credentialspec %s.", container.Name, desiredCredSpecInjection) if len(hostConfig.SecurityOpt) == 0 { hostConfig.SecurityOpt = []string{desiredCredSpecInjection} } else { for idx, opt := range hostConfig.SecurityOpt { if strings.HasPrefix(opt, "credentialspec:") { hostConfig.SecurityOpt[idx] = desiredCredSpecInjection } } } } else { emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)} } } if container.ShouldCreateWithEnvFiles() { err := task.MergeEnvVarsFromEnvfiles(container) if err != nil { seelog.Errorf("Error populating environment variables from specified files into container %s", container.Name) return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } if execcmd.IsExecEnabledContainer(container) { tID, err := task.GetID() if err != nil { herr := &apierrors.HostConfigError{Msg: err.Error()} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(herr)} } err = engine.execCmdMgr.InitializeContainer(tID, container, hostConfig) if err != nil { seelog.Warnf("Exec Agent initialization: %v . Continuing to start container without enabling exec feature.", err) // Emit a managedagent state chnage event if exec agent initialization fails engine.tasksLock.RLock() mTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", err)) } else { seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name) } } } config, err := task.DockerConfig(container, dockerClientVersion) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } // Augment labels with some metadata from the agent. Explicitly do this last // such that it will always override duplicates in the provided raw config // data. config.Labels[labelTaskARN] = task.Arn config.Labels[labelContainerName] = container.Name config.Labels[labelTaskDefinitionFamily] = task.Family config.Labels[labelTaskDefinitionVersion] = task.Version config.Labels[labelCluster] = engine.cfg.Cluster if dockerContainerName == "" { // only alphanumeric and hyphen characters are allowed reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+") name := reInvalidChars.ReplaceAllString(container.Name, "") dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex() // Pre-add the container in case we stop before the next, more useful, // AddContainer call. This ensures we have a way to get the container if // we die before 'createContainer' returns because we can inspect by // name engine.state.AddContainer(&apicontainer.DockerContainer{ DockerName: dockerContainerName, Container: container, }, task) seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s", task.Arn, container.Name, dockerContainerName) } // Create metadata directory and file then populate it with common metadata of all containers of this task // Afterwards add this directory to the container's mounts if file creation was successful if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() { info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout) if infoErr != nil { seelog.Warnf("Task engine [%s]: unable to get docker info : %v", task.Arn, infoErr) } mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions) if mderr != nil { seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v", task.Arn, container.Name, mderr) } } createContainerBegin := time.Now() metadata := client.CreateContainer(engine.ctx, config, hostConfig, dockerContainerName, engine.cfg.ContainerCreateTimeout) if metadata.DockerID != "" { seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s", task.Arn, container.Name, metadata.DockerID) dockerContainer := &apicontainer.DockerContainer{DockerID: metadata.DockerID, DockerName: dockerContainerName, Container: container} engine.state.AddContainer(dockerContainer, task) engine.saveDockerContainerData(dockerContainer) } container.SetLabels(config.Labels) seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s", task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin)) container.SetRuntimeID(metadata.DockerID) return metadata } func getFirelensLogConfig(task *apitask.Task, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig, cfg *config.Config) dockercontainer.LogConfig { fields := strings.Split(task.Arn, "/") taskID := fields[len(fields)-1] tag := fmt.Sprintf(fluentTagDockerFormat, container.Name, taskID) fluentd := socketPathPrefix + filepath.Join(cfg.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath) logConfig := hostConfig.LogConfig logConfig.Type = logDriverTypeFluentd logConfig.Config = make(map[string]string) logConfig.Config[logDriverTag] = tag logConfig.Config[logDriverFluentdAddress] = fluentd logConfig.Config[logDriverAsyncConnect] = strconv.FormatBool(true) logConfig.Config[logDriverSubSecondPrecision] = strconv.FormatBool(true) seelog.Debugf("Applying firelens log config for container %s: %v", container.Name, logConfig) return logConfig } func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID()) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } dockerID, err := engine.getDockerID(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{ FromError: err, }, } } startContainerBegin := time.Now() dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout) if dockerContainerMD.Error != nil { return dockerContainerMD } seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s", task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin)) // Get metadata through container inspection and available task information then write this to the metadata file // Performs this in the background to avoid delaying container start // TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and // add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() { go func() { err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name) if err != nil { seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v", task.Arn, container.Name, err) return } container.SetMetadataFileUpdated() seelog.Debugf("Task engine [%s]: updated metadata file for container %s", task.Arn, container.Name) }() } // If container is a firelens container, fluent host is needed to be added to the environment variable for the task. // For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode, // there is a need to wait for the IP to be present before the container using the firelens can be created. if container.GetFirelensConfig() != nil { if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) { _, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings) if !gotContainerIP { getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier) contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute) defer cancel() err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error { inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID, dockerclient.InspectContainerTimeout) if err != nil { return err } _, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings) if gotIPBridge { dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings return nil } else { return errors.New("Bridge IP not available to use for firelens") } }) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{FromError: err}, } } } } } if execcmd.IsExecEnabledContainer(container) { if ma, _ := container.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed { reason := "ExecuteCommandAgent started" if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil { reason = err.Error() seelog.Errorf("Task engine [%s]: Failed to start ExecCommandAgent Process for container [%s]: %v", task.Arn, container.Name, err) } engine.tasksLock.RLock() mTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() // whether we started or failed to start, we'll want to emit a state change event // redundant state change events like RUNNING->RUNNING are allowed if ok { mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, reason) } else { seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name) } } } // On Windows, we need to invoke CNI plugins for all containers // invokePluginsForContainer will return nil for other platforms if dockerContainerMD.Error == nil && task.IsNetworkModeAWSVPC() && !container.IsInternal() { err := engine.invokePluginsForContainer(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrapf(err, "startContainer: cni plugin invocation failed"), }, } } } return dockerContainerMD } func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: setting up container resources for container [%s]", task.Arn, container.Name) containerInspectOutput, err := engine.inspectContainer(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrap(err, "container resource provisioning: cannot setup task network namespace due to error inspecting pause container"), }, } } task.SetPausePIDInVolumeResources(strconv.Itoa(containerInspectOutput.State.Pid)) cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, true) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrap(err, "container resource provisioning: unable to build cni configuration"), }, } } // Invoke the libcni to config the network namespace for the container result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout) if err != nil { seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v", task.Arn, err) return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, Error: ContainerNetworkingError{errors.Wrap(err, "container resource provisioning: failed to setup network namespace")}, } } // This is the IP of the task assigned on the bridge for IAM Task roles if result != nil { taskIP := result.IPs[0].Address.IP.String() seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP) engine.state.AddTaskIPAddress(taskIP, task.Arn) task.SetLocalIPAddress(taskIP) engine.saveTaskData(task) } return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, } } // checkTearDownPauseContainer idempotently tears down the pause container network when the pause container's known //or desired status is stopped. func (engine *DockerTaskEngine) checkTearDownPauseContainer(task *apitask.Task) { if !task.IsNetworkModeAWSVPC() { return } for _, container := range task.Containers { // Cleanup the pause container network namespace before stop the container if container.Type == apicontainer.ContainerCNIPause { // Clean up if the pause container has stopped or will stop if container.KnownTerminal() || container.DesiredTerminal() { err := engine.cleanupPauseContainerNetwork(task, container) if err != nil { seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err) } } return } } } // cleanupPauseContainerNetwork will clean up the network namespace of pause container func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error { // This operation is idempotent if container.IsContainerTornDown() { return nil } delay := time.Duration(engine.cfg.ENIPauseContainerCleanupDelaySeconds) * time.Second if engine.handleDelay != nil && delay > 0 { seelog.Infof("Task engine [%s]: waiting %s before cleaning up pause container.", task.Arn, delay) engine.handleDelay(delay) } containerInspectOutput, err := engine.inspectContainer(task, container) if err != nil { return errors.Wrap(err, "engine: cannot cleanup task network namespace due to error inspecting pause container") } seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn) cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, false) if err != nil { return errors.Wrapf(err, "engine: failed cleanup task network namespace, task: %s", task.String()) } err = engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout) if err != nil { return err } container.SetContainerTornDown(true) seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn) return nil } // buildCNIConfigFromTaskContainer builds a CNI config for the task and container. func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer( task *apitask.Task, containerInspectOutput *types.ContainerJSON, includeIPAMConfig bool) (*ecscni.Config, error) { cniConfig := &ecscni.Config{ BlockInstanceMetadata: engine.cfg.AWSVPCBlockInstanceMetdata.Enabled(), MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion, PrimaryIPv4VPCCIDR: engine.cfg.PrimaryIPv4VPCCIDR, } if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil && len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 && len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 { cniConfig.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address } if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 { cniConfig.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes } cniConfig.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid) cniConfig.ContainerID = containerInspectOutput.ID cniConfig.ContainerNetNS = "" // For pause containers, NetNS would be none // For other containers, NetNS would be of format container:<pause_container_ID> if containerInspectOutput.HostConfig.NetworkMode.IsNone() { cniConfig.ContainerNetNS = containerInspectOutput.HostConfig.NetworkMode.NetworkName() } else if containerInspectOutput.HostConfig.NetworkMode.IsContainer() { cniConfig.ContainerNetNS = fmt.Sprintf("container:%s", containerInspectOutput.HostConfig.NetworkMode.ConnectedContainer()) } else { return nil, errors.New("engine: failed to build cni configuration from the task due to invalid container network namespace") } cniConfig, err := task.BuildCNIConfig(includeIPAMConfig, cniConfig) if err != nil { return nil, errors.Wrapf(err, "engine: failed to build cni configuration from task") } return cniConfig, nil } func (engine *DockerTaskEngine) inspectContainer(task *apitask.Task, container *apicontainer.Container) (*types.ContainerJSON, error) { dockerID, err := engine.getDockerID(task, container) if err != nil { return nil, err } return engine.client.InspectContainer(engine.ctx, dockerID, dockerclient.InspectContainerTimeout) } func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name) dockerID, err := engine.getDockerID(task, container) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStopContainerError{ FromError: err, }, } } // Cleanup the pause container network namespace before stop the container if container.Type == apicontainer.ContainerCNIPause { err := engine.cleanupPauseContainerNetwork(task, container) if err != nil { seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err) } } apiTimeoutStopContainer := container.GetStopTimeout() if apiTimeoutStopContainer <= 0 { apiTimeoutStopContainer = engine.cfg.DockerStopTimeout } return engine.client.StopContainer(engine.ctx, dockerID, apiTimeoutStopContainer) } func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error { seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name) dockerID, err := engine.getDockerID(task, container) if err != nil { return err } return engine.client.RemoveContainer(engine.ctx, dockerID, dockerclient.RemoveContainerTimeout) } // updateTaskUnsafe determines if a new transition needs to be applied to the // referenced task, and if needed applies it. It should not be called anywhere // but from 'AddTask' and is protected by the tasksLock lock there. func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) { managedTask, ok := engine.managedTasks[task.Arn] if !ok { seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.", task.Arn) return } // Keep the lock because sequence numbers cannot be correct unless they are // also read in the order addtask was called // This does block the engine's ability to ingest any new events (including // stops for past tasks, ack!), but this is necessary for correctness updateDesiredStatus := update.GetDesiredStatus() seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]", task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) managedTask.emitACSTransition(acsTransition{ desiredStatus: updateDesiredStatus, seqnum: update.StopSequenceNumber, }) seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]", task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) } // transitionContainer calls applyContainerState, and then notifies the managed // task of the change. transitionContainer is called by progressTask and // by handleStoppedToRunningContainerTransition. func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to apicontainerstatus.ContainerStatus) { // Let docker events operate async so that we can continue to handle ACS / other requests // This is safe because 'applyContainerState' will not mutate the task metadata := engine.applyContainerState(task, container, to) engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { managedTask.emitDockerContainerChange(dockerContainerChange{ container: container, event: dockerapi.DockerContainerChangeEvent{ Status: to, DockerContainerMetadata: metadata, }, }) } } // applyContainerState moves the container to the given state by calling the // function defined in the transitionFunctionMap for the state func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState apicontainerstatus.ContainerStatus) dockerapi.DockerContainerMetadata { transitionFunction, ok := engine.transitionFunctionMap()[nextState] if !ok { seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s", task.Arn, container.Name, nextState.String()) return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}} } metadata := transitionFunction(task, container) if metadata.Error != nil { seelog.Infof("Task engine [%s]: error transitioning container [%s (Runtime ID: %s)] to [%s]: %v", task.Arn, container.Name, container.GetRuntimeID(), nextState.String(), metadata.Error) } else { seelog.Debugf("Task engine [%s]: transitioned container [%s (Runtime ID: %s)] to [%s]", task.Arn, container.Name, container.GetRuntimeID(), nextState.String()) } return metadata } // transitionFunctionMap provides the logic for the simple state machine of the // DockerTaskEngine. Each desired state maps to a function that can be called // to try and move the task to that desired state. func (engine *DockerTaskEngine) transitionFunctionMap() map[apicontainerstatus.ContainerStatus]transitionApplyFunc { return engine.containerStatusToTransitionFunction } type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata) // State is a function primarily meant for testing usage; it is explicitly not // part of the TaskEngine interface and should not be relied upon. // It returns an internal representation of the state of this DockerTaskEngine. func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState { return engine.state } // Version returns the underlying docker version. func (engine *DockerTaskEngine) Version() (string, error) { return engine.client.Version(engine.ctx, dockerclient.VersionTimeout) } func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) { err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name) if err != nil { seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v", task.Arn, cont.Container.Name, err) } else { cont.Container.SetMetadataFileUpdated() seelog.Debugf("Task engine [%s]: updated metadata file for container %s", task.Arn, cont.Container.Name) } } func getContainerHostIP(networkSettings *types.NetworkSettings) (string, bool) { if networkSettings == nil { return "", false } else if networkSettings.IPAddress != "" { return networkSettings.IPAddress, true } else if len(networkSettings.Networks) > 0 { for mode, network := range networkSettings.Networks { if mode == apitask.BridgeNetworkMode && network.IPAddress != "" { return network.IPAddress, true } } } return "", false } func (engine *DockerTaskEngine) getDockerID(task *apitask.Task, container *apicontainer.Container) (string, error) { runtimeID := container.GetRuntimeID() if runtimeID != "" { return runtimeID, nil } containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return "", errors.Errorf("container name=%s belongs to unrecognized task taskArn=%s", container.Name, task.Arn) } dockerContainer, ok := containerMap[container.Name] if !ok { return "", errors.Errorf("container name=%s not recognized by agent", container.Name) } if dockerContainer.DockerID == "" { return dockerContainer.DockerName, nil } return dockerContainer.DockerID, nil }
1
25,944
Feels like this needs to be invoked in `PostUnmarshalTask` since that's where most of the task setup happens.
aws-amazon-ecs-agent
go
@@ -5390,7 +5390,8 @@ PrivStatus ObjectPrivsMDTable::updateWhere(const std::string & setClause, // <catalogName> "<schema_name>"."<object_name>", // object_type, // object_owner, -- granteeID -// (select auth_db_name from AUTHS where auth_id = object_owner), --granteeName +// coalesce((select auth_db_name from AUTHS where auth_id = object_owner), +// 'DB__ROOT') --granteeName // USER_GRANTEE_LIT, -- "U" // SYSTEM_AUTH_ID, -- system grantor ID (-2) // SYSTEM_AUTH_NAME, -- grantorName (_SYSTEM)
1
//***************************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ //***************************************************************************** #include "PrivMgrPrivileges.h" #include "PrivMgrMD.h" #include "PrivMgrMDTable.h" #include "PrivMgrDesc.h" #include "PrivMgrDefs.h" #include "PrivMgrRoles.h" #include "PrivMgrComponentPrivileges.h" #include "PrivMgrObjects.h" #include "PrivMgrCommands.h" #include <numeric> #include <cstdio> #include <algorithm> #include <iterator> #include "sqlcli.h" #include "ComSmallDefs.h" #include "ExExeUtilCli.h" #include "ComDiags.h" #include "ComQueue.h" #include "CmpCommon.h" #include "CmpContext.h" #include "CmpDDLCatErrorCodes.h" #include "ComSecurityKey.h" #include "NAUserId.h" #include "ComUser.h" #include "CmpSeabaseDDLutil.h" #include "logmxevent_traf.h" class ColPrivGrant; class ColumnPrivsMDTable; // **************************************************************************** // File: PrivMgrPrivileges.h // // This file contains: // class ObjectPrivsMDRow // class ObjectPrivsMDTable // non inline methods for class PrivMgrPrivileges // **************************************************************************** // ***************************************************************************** // * Class: ObjectPrivsMDRow // * Description: This class represents a row from the OBJECT_PRIVILEGES table // * // * An object row can be uniquely identified by its object UID, granteeID // * and grantorID. // ***************************************************************************** class ObjectPrivsMDRow : public PrivMgrMDRow { public: // ------------------------------------------------------------------- // Constructors and destructors: // ------------------------------------------------------------------- ObjectPrivsMDRow() : PrivMgrMDRow(PRIVMGR_OBJECT_PRIVILEGES, OBJECT_PRIVILEGES_ENUM), objectUID_(0), grantorID_(0), granteeID_(0) { }; ObjectPrivsMDRow(const ObjectPrivsMDRow &other) : PrivMgrMDRow(other) { objectUID_ = other.objectUID_; objectName_ = other.objectName_; objectType_ = other.objectType_; granteeID_ = other.granteeID_; granteeName_ = other.granteeName_; granteeType_ = other.granteeType_; grantorID_ = other.grantorID_; grantorName_ = other.grantorName_; grantorType_ = other.grantorType_; privsBitmap_ = other.privsBitmap_; grantableBitmap_ = other.grantableBitmap_; current_ = other.current_; visited_ = other.visited_; }; virtual ~ObjectPrivsMDRow() {}; // Methods used to determine changes after processing revoked privileges PrivMgrCoreDesc& accessCurrent() { return current_; } PrivMgrCoreDesc& accessVisited() { return visited_; } void clearVisited() { visited_.setAllPrivAndWgo(false); }; bool isChanged() { return (current_ == PrivMgrCoreDesc(privsBitmap_, grantableBitmap_)); } void setToOriginal() { current_ = PrivMgrCoreDesc(privsBitmap_, grantableBitmap_); } ; // Return True iff some current flag is set, where visited is not. NABoolean anyNotVisited() const {return current_.anyNotSet( visited_ );} // Clear current where current was set and visited was not. // Return True iff some current flag gets cleared. NABoolean cascadeLosses(); // Describe a row for tracing void describeRow (std::string &rowDetails); // ------------------------------------------------------------------- // Data Members: // ------------------------------------------------------------------- int64_t objectUID_; std::string objectName_; ComObjectType objectType_; int32_t granteeID_; std::string granteeName_; std::string granteeType_; int32_t grantorID_; std::string grantorName_; std::string grantorType_; PrivObjectBitmap privsBitmap_; PrivObjectBitmap grantableBitmap_; PrivMgrCoreDesc visited_; PrivMgrCoreDesc current_; }; // ***************************************************************************** // * Class: ObjectPrivsMDTable // * Description: This class represents the OBJECT_PRIVILEGES table // * // * An object privileges row can be uniquely identified by: // * objectUID // * granteeID // * grantorID // ***************************************************************************** class ObjectPrivsMDTable : public PrivMgrMDTable { public: ObjectPrivsMDTable( const std::string & tableName, ComDiagsArea * pDiags = NULL) : PrivMgrMDTable(tableName,OBJECT_PRIVILEGES_ENUM, pDiags) {}; virtual ~ObjectPrivsMDTable() {}; virtual PrivStatus insert(const PrivMgrMDRow &row); virtual PrivStatus selectWhereUnique( const std::string & whereClause, PrivMgrMDRow & row); PrivStatus selectWhere( const std::string & whereClause, const std::string & orderByClause, std::vector<PrivMgrMDRow *> &rowList); PrivStatus deleteRow(const ObjectPrivsMDRow & row); virtual PrivStatus deleteWhere(const std::string & whereClause); PrivStatus updateRow(const ObjectPrivsMDRow & row); PrivStatus updateWhere( const std::string & setClause, const std::string & whereClause); PrivStatus insertSelect( const std::string & objectsLocation, const std::string & authsLocation); PrivStatus insertSelectOnAuthsToPublic( const std::string &objectsLocation, const std::string &authsLocation); private: ObjectPrivsMDTable(); void setRow(OutputInfo *pCliRow, ObjectPrivsMDRow &rowOut); }; // ***************************************************************************** // * Class: ColumnPrivsMDRow // * Description: This class represents a row from the COLUMN_PRIVILEGES table // * // * An column row can be uniquely identified by its object UID, granteeID, // * grantorID, and column ordinal. // ***************************************************************************** class ColumnPrivsMDRow : public PrivMgrMDRow { public: // ------------------------------------------------------------------- // Constructors and destructors: // ------------------------------------------------------------------- ColumnPrivsMDRow() : PrivMgrMDRow(PRIVMGR_COLUMN_PRIVILEGES, COLUMN_PRIVILEGES_ENUM), objectUID_(0), grantorID_(0), granteeID_(0), columnOrdinal_(0) { }; ColumnPrivsMDRow(const ColumnPrivsMDRow &other) : PrivMgrMDRow(other) { objectUID_ = other.objectUID_; objectName_ = other.objectName_; granteeID_ = other.granteeID_; granteeName_ = other.granteeName_; grantorID_ = other.grantorID_; grantorName_ = other.grantorName_; columnOrdinal_ = other.columnOrdinal_; privsBitmap_ = other.privsBitmap_; grantableBitmap_ = other.grantableBitmap_; }; virtual ~ColumnPrivsMDRow() {}; // Describe a row for tracing void describeRow (std::string &rowDetails); // ------------------------------------------------------------------- // Data Members: // ------------------------------------------------------------------- int64_t objectUID_; std::string objectName_; int32_t granteeID_; std::string granteeName_; int32_t grantorID_; std::string grantorName_; int32_t columnOrdinal_; PrivColumnBitmap privsBitmap_; PrivColumnBitmap grantableBitmap_; }; // ***************************************************************************** // * Class: ColumnPrivsMDTable // * Description: This class represents the COLUMN_PRIVILEGES table // * // * An column privileges row can be uniquely identified by: // * objectUID // * granteeID // * grantorID // * columnOrdinal // ***************************************************************************** class ColumnPrivsMDTable : public PrivMgrMDTable { public: ColumnPrivsMDTable( const std::string & tableName, ComDiagsArea * pDiags = NULL) : PrivMgrMDTable(tableName,COLUMN_PRIVILEGES_ENUM, pDiags) {}; virtual ~ColumnPrivsMDTable() {}; virtual PrivStatus insert(const PrivMgrMDRow &row); virtual PrivStatus selectWhereUnique( const std::string & whereClause, PrivMgrMDRow & row); PrivStatus selectWhere( const std::string & whereClause, const std::string & orderByClause, std::vector<PrivMgrMDRow *> &rowList); PrivStatus updateColumnRow( const ColumnPrivsMDRow & row, const std::string whereBase); private: ColumnPrivsMDTable(); void setRow( OutputInfo *pCliRow, ColumnPrivsMDRow &rowOut); }; class ColPrivEntry { public: int32_t columnOrdinal; PrivColumnBitmap privsBitmap; PrivColumnBitmap grantableBitmap; bool isUpdate; ColPrivEntry() : columnOrdinal(0),isUpdate(false){}; }; class ColObjectGrants { public: ColObjectGrants(int64_t objectUID) : object_uid_(objectUID) {}; ~ColObjectGrants(); const ColPrivEntry * getColPrivGrant(int32_t columnOrdinal) const { for (size_t i = 0; i < colPrivGrants_.size(); i++) if (colPrivGrants_[i].columnOrdinal == columnOrdinal) return &colPrivGrants_[i]; return NULL; } void clear() { colPrivGrants_.clear(); } private: int64_t object_uid_; std::vector<ColPrivEntry> colPrivGrants_; }; // ***************************************************************************** // * PrivMgrPrivileges.cpp static function declarations * // ***************************************************************************** static PrivStatus buildPrivText( const std::vector<PrivMgrMDRow *> rowList, const PrivMgrObjectInfo & objectInfo, PrivLevel privLevel, ComDiagsArea * pDiags_, std::string & privilegeText); void static buildGrantText( const std::string & privText, const std::string & objectGranteeText, const int32_t grantorID, const std::string grantorName, bool isWGO, const int32_t ownerID, std::string & grantText); static PrivStatus buildColumnSecurityKeys( const int64_t objectUID, const PrivColList & colPrivsList, const int32_t userID, std::vector<ComSecurityKey *> & secKeySet); static PrivStatus buildUserSecurityKeys( const std::vector<int32_t> & roleIDs, const int32_t userID, std::vector <ComSecurityKey *> & secKeySet); void static closeColumnList(std::string & columnList); static void deleteRowList(std::vector<PrivMgrMDRow *> & rowList); static ColPrivEntry * findColumnEntry( std::vector<ColPrivEntry> & colPrivsToGrant, const int32_t columnsOrdinal); static PrivStatus getColRowsForGrantee( const std::vector <PrivMgrMDRow *> &columnRowList, const int32_t granteeID, const std::vector<int32_t> & roleIDs, std::vector<ColumnPrivsMDRow> &rowList, std::vector <ComSecurityKey *>* secKeySet); static void getColRowsForGranteeGrantor( const std::vector <PrivMgrMDRow *> & columnRowList, const int32_t granteeID, const int32_t grantorID, std::vector<ColPrivEntry> &colPrivGrants); static bool hasAllDMLPrivs( ComObjectType objectType, PrivObjectBitmap privBitmap); static bool hasGrantedColumnPriv( const std::vector <PrivMgrMDRow *> & columnRowList, int32_t grantorID, int32_t granteeID, const std::vector<ColPrivSpec> & colPrivsArray, PrivStatus & privStatus, std::string & privilege, std::vector<ColPrivEntry> & grantedColPrivs); static bool isDelimited( const std::string &identifier); // ***************************************************************************** // PrivMgrPrivileges methods // ***************************************************************************** // ----------------------------------------------------------------------- // Default Constructor // ----------------------------------------------------------------------- PrivMgrPrivileges::PrivMgrPrivileges () : PrivMgr(), objectUID_(0), grantorID_(0) { objectTableName_ = metadataLocation_ + "." + PRIVMGR_OBJECT_PRIVILEGES; columnTableName_ = metadataLocation_ + "." + PRIVMGR_COLUMN_PRIVILEGES; } // ----------------------------------------------------------------------- // Construct a PrivMgrPrivileges object for a new object privilege. // ----------------------------------------------------------------------- PrivMgrPrivileges::PrivMgrPrivileges ( const int64_t objectUID, const std::string &objectName, const int32_t grantorID, const std::string &metadataLocation, ComDiagsArea * pDiags) : PrivMgr(metadataLocation,pDiags), objectUID_(objectUID), objectName_(objectName), grantorID_(grantorID) { objectTableName_ = metadataLocation + "." + PRIVMGR_OBJECT_PRIVILEGES; columnTableName_ = metadataLocation + "." + PRIVMGR_COLUMN_PRIVILEGES; } // ---------------------------------------------------------------------------- // Construct PrivMgrPrivileges object for describe statements // ---------------------------------------------------------------------------- PrivMgrPrivileges::PrivMgrPrivileges ( const PrivMgrObjectInfo &objectInfo, const std::string &metadataLocation, ComDiagsArea *pDiags) : PrivMgr(metadataLocation, pDiags), objectUID_(((PrivMgrObjectInfo)objectInfo).getObjectUID()), objectName_(((PrivMgrObjectInfo)objectInfo).getObjectName()), grantorID_(0) { objectTableName_ = metadataLocation + "." + PRIVMGR_OBJECT_PRIVILEGES; columnTableName_ = metadataLocation + "." + PRIVMGR_COLUMN_PRIVILEGES; } // ---------------------------------------------------------------------------- // Construct a PrivMgrPrivileges object for an objectUID // ---------------------------------------------------------------------------- PrivMgrPrivileges::PrivMgrPrivileges ( const int64_t objectUID, const std::string &metadataLocation, ComDiagsArea *pDiags) : PrivMgr(metadataLocation, pDiags), objectUID_(objectUID), grantorID_(0) { objectTableName_ = metadataLocation + "." + PRIVMGR_OBJECT_PRIVILEGES; columnTableName_ = metadataLocation + "." + PRIVMGR_COLUMN_PRIVILEGES; } // ---------------------------------------------------------------------------- // Construct a basic PrivMgrPrivileges object // ---------------------------------------------------------------------------- PrivMgrPrivileges::PrivMgrPrivileges ( const std::string &metadataLocation, ComDiagsArea *pDiags) : PrivMgr(metadataLocation, pDiags), objectUID_(0), grantorID_(0) { objectTableName_ = metadataLocation + "." + PRIVMGR_OBJECT_PRIVILEGES; columnTableName_ = metadataLocation + "." + PRIVMGR_COLUMN_PRIVILEGES; } // ----------------------------------------------------------------------- // Copy constructor // ----------------------------------------------------------------------- PrivMgrPrivileges::PrivMgrPrivileges(const PrivMgrPrivileges &other) : PrivMgr(other) { objectUID_ = other.objectUID_; objectName_ = other.objectName_; grantorID_ = other.grantorID_; objectTableName_ = other.objectTableName_; columnTableName_ = other.columnTableName_; objectRowList_ = other.objectRowList_; columnRowList_ = other.columnRowList_; } // ----------------------------------------------------------------------- // Destructor. // ----------------------------------------------------------------------- PrivMgrPrivileges::~PrivMgrPrivileges() { deleteRowList(objectRowList_); deleteRowList(columnRowList_); } // ***************************************************************************** // * Method: buildSecurityKeys // * // * Builds security keys for the current object and specified user. // * // * Parameters: // * // * <granteeID> is the unique identifier for the grantee // * <privs> is the list of privileges the user has on the object // * <secKeySet> is the set of security keys to be passed back. Caller is // * responsible for freeing keys. // * // * Returns: PrivStatus // * // * STATUS_GOOD: Security keys were built // * *: Security keys were not built, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::buildSecurityKeys( const int32_t granteeID, const PrivMgrCoreDesc &privs, std::vector <ComSecurityKey *> & secKeySet) { // Only need to generate keys for DML privileges for ( size_t i = FIRST_DML_PRIV; i <= LAST_DML_PRIV; i++ ) { if ( privs.getPriv(PrivType(i))) { ComSecurityKey *key = new ComSecurityKey(granteeID, objectUID_, PrivType(i), ComSecurityKey::OBJECT_IS_OBJECT); if (key->isValid()) secKeySet.push_back(key); else { PRIVMGR_INTERNAL_ERROR("ComSecurityKey is null"); return STATUS_ERROR; } } } return STATUS_GOOD; } // ***************************************************************************** // * Method: getColPrivsForUser // * // * Returns the column privileges a user has been granted on the object. // * // * Parameters: // * // * <granteeID> is the unique identifier for the grantee // * <roleIDs> specifies a list of roles granted to the grantee // * <colPrivsList> passes back the list of privs granted // * <colGrantableList> passes back the list the user has WGO for // * <secKeySet> if not NULL, returns a set of keys for user // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were returned // * *: Unable to lookup privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getColPrivsForUser( const int32_t granteeID, const std::vector<int32_t> & roleIDs, PrivColList & colPrivsList, PrivColList & colGrantableList, std::vector <ComSecurityKey *>* secKeySet) { std::vector<ColumnPrivsMDRow> rowList; // Get the privileges for the columns of the object granted to the grantee PrivStatus privStatus = getColRowsForGrantee(columnRowList_,granteeID,roleIDs, rowList,secKeySet); if (privStatus == STATUS_ERROR) return privStatus; for (int32_t i = 0; i < rowList.size();++i) { const int32_t columnOrdinal = rowList[i].columnOrdinal_; colPrivsList[columnOrdinal] = rowList[i].privsBitmap_; colGrantableList[columnOrdinal] = rowList[i].grantableBitmap_; if (secKeySet != NULL) { privStatus = buildColumnSecurityKeys(objectUID_,colPrivsList, rowList[i].granteeID_,*secKeySet); if (privStatus != STATUS_GOOD) return privStatus; } } return STATUS_GOOD; } // ***************************************************************************** // * Method: getPrivRowsForObject // * // * returns rows describing all the privileges that have been // * granted on the object // * // * Parameters: // * // * <objectPrivsRows> Zero or more rows of grants for the object. // * // * Returns: PrivStatus // * // * STATUS_GOOD : Rows were returned // * STATUS_NOTFOUND: No rows were returned // * *: Unable to read privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getPrivRowsForObject( const int64_t objectUID, std::vector<ObjectPrivsRow> & objectPrivsRows) { PrivStatus retcode = STATUS_GOOD; if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for getPrivRowsForObject()"); return STATUS_ERROR; } // generate the list of privileges granted to the object and store in class if (generateObjectRowList() == STATUS_ERROR) return STATUS_ERROR; for (size_t i = 0; i < objectRowList_.size(); i++) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &> (*objectRowList_[i]); if (row.grantorID_ != SYSTEM_AUTH_ID) { ObjectPrivsRow newRow; strcpy(newRow.objectName,row.objectName_.c_str()); newRow.objectType = row.objectType_; newRow.granteeID = row.granteeID_; strcpy(newRow.granteeName,row.granteeName_.c_str()); newRow.granteeType = CmGetComGranteeAsGranteeType(row.granteeType_.c_str()); newRow.grantorID = row.grantorID_; strcpy(newRow.grantorName,row.grantorName_.c_str()); newRow.grantorType = CmGetComGrantorAsGrantorType(row.grantorType_.c_str()); newRow.privilegesBitmap = row.privsBitmap_.to_ulong(); newRow.grantableBitmap = row.grantableBitmap_.to_ulong(); objectPrivsRows.push_back(newRow); } } return retcode; } // ---------------------------------------------------------------------------- // method: getTreeOfGrantors // // Returns the list of grantors that have granted privileges to the grantee, // either directly or through another grantor in the tree. // // The list is determined by first looking at the direct grantors. For each // returned grantor, this function is called recursively to get the previous set // of grantors until there are no more grantors. // // The list is returned in grantor ID order. // // For example: // user1 (owner) grants to: // user6 who grants to: // user3 // user4 who grants to: // user5 // user2 // user3 who grants to: // user4 // user5 // The following grantors are returned for granteeID user4: // user1, user3, user6 // // Params: // granteeID - where to start the search // listOfGrantors - returns the list of grantors // ---------------------------------------------------------------------------- void PrivMgrPrivileges::getTreeOfGrantors( const int32_t granteeID, std::set<int32_t> &listOfGrantors) { // search the rowList for a match for (size_t i = 0; i < objectRowList_.size(); i++) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &> (*objectRowList_[i]); if (row.granteeID_ == granteeID) { // We found a grant to the granteeID // go up to the next level using the grantorID getTreeOfGrantors( row.grantorID_, listOfGrantors); listOfGrantors.insert(granteeID); } } } // ***************************************************************************** // * Method: givePrivForObjects // * // * Updates one or more rows in the OBJECT_PRIVILEGES table to reflect // * a new owner of one or more objects. // * // * Parameters: // * // * <currentOwnerID> is the unique identifier for the current owner // * <newOwnerID> is the unique identifier for the new owner // * <newOwnerName> is the name of the new owner (upper cased) // * <objectUIDs> is the list of objects with a new owner // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were updated to reflect new owner // * *: Unable to update privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::givePrivForObjects( const int32_t currentOwnerID, const int32_t newOwnerID, const std::string &newOwnerName, const std::vector<int64_t> &objectUIDs) { PrivStatus privStatus = STATUS_GOOD; for (size_t i = 0; i < objectUIDs.size(); i++) { privStatus = givePriv(currentOwnerID,newOwnerID,newOwnerName,objectUIDs[i]); if (privStatus != STATUS_GOOD) return privStatus; } return STATUS_GOOD; } // ***************************************************************************** // * Method: givePriv // * // * Updates rows in the OBJECT_PRIVILEGES table to reflect a new owner of // * an objects. // * // * Parameters: // * // * <granteeID> is the unique identifier for the new owner // * <granteeName> is the name of the new owner (upper cased) // * <objectUIDs> is the list of objects with a new owner // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were updated to reflect new owner // * *: Unable to update privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::givePriv( const int32_t currentOwnerID, const int32_t newOwnerID, const std::string &newOwnerName, const int64_t objectUID) { // ***************************************************************************** // * * // * The set of grants for a given object can be thought of as a tree with * // * many branches and one root. At the root, is the grant from the system * // * (grantor is _SYSTEM) to the owner of the object. The grant from the * // * system is for all privileges, with grant option. * // * * // * The owner then grants privileges to one or more authIDs. Each of * // * these grants can be viewed as a branch off the system grant root. With * // * WITH GRANT OPTION, each of these branches can have its own set of * // * branches, potentially resulting in a dense tree. See the rough * // * drawing: * // * * // * USERG USERC USERD USERC USERB * // * \ | / \ | / * // * \ | / \ | / * // * \ | / \ | / * // * USERE USERD USERF USERB USERG * // * \ | / \ | / * // * \ | / \ | / * // * \ | / \ | / * // * USERB USERC USERD * // * \ | / * // * \ | / * // * \ | / * // * USERA * // * | * // * _SYSTEM * // * * // * * // * Some of the rules are: * // * 1) No grants to the owner (USERA) * // * 2) A user can appear an unlimited number of times in the tree, but * // * all grants from the user (for a given privilege) are from the same * // * node. For example, USERB is granted 3 times, but all grants from * // * USERB emanate from the same node. This is because when USERB grants * // * a privilege, we start from the root and find the first node where * // * USERB has WITH GRANT OPTION. * // * * // * When an object is given to another user, the current owner loses * // * their grant from the system; instead, the new owner is granted from the * // * system. All existing grants from the current owner are now from the new * // * owner. Based on rule #1 above, the new owner can no longer appear in * // * other nodes in the tree except the root node. So, in the case where the * // * object is given to USERB, the new grant tree should be: * // * * // * USERD USERC * // * | \ | * // * | \ | * // * USERC | \ | * // * \ | USERF USERG * // * \ | | \ / * // * \ | | \ / * // * \ | | \ / * // * USERG ---------USERE | USERD * // * \ | / * // * \ | / * // * \ | / * // * USERB ---- USERC * // * | * // * _SYSTEM * // * * // * * // * Note, previously USERA had three grants, and USERB had three grants, * // * and now USERB has four grants--not six. First, the grant from USERA to * // * USERB is removed. USERB is the grantee on only one node, from the system * // * Second, USERA and USERB both had a grant to USERD; these grants need to * // * be combined. * // * * // * Essentially, the subtree (or branch) of USERB has been grafted into * // * the root, and all USERB leaf nodes have been removed. Any duplicate * // * nodes, where USERA and USERB were grantors to the same grantee, have * // * been merged. * // * * // * The algorithm is therefore three steps: * // * * // * 1) Delete any leaf nodes where the new owner is the grantee. * // * 2) Get the list of nodes where the original or the new owner is the * // * grantor. Merge any duplicates (update bitmaps), and update to new * // * owner for old nodes. * // * 3) Update grantee on system grant to new owner. * // * * // ***************************************************************************** // // Delete the leaf nodes: // // DELETE FROM OBJECT_PRIVIELGES // WHERE OBJECT_UID = objectUID and GRANTEE_ID = granteeID // PrivStatus privStatus = STATUS_GOOD; ObjectPrivsMDTable objectPrivsTable(objectTableName_,pDiags_); std::vector<PrivMgrMDRow *> rowList; char buf[1000]; sprintf(buf,"WHERE object_uid = %ld AND grantee_ID = %d", objectUID,newOwnerID); privStatus = objectPrivsTable.deleteWhere(buf); if (privStatus != STATUS_GOOD) return privStatus; sprintf(buf, "WHERE object_uid = %ld AND (grantor_ID = %d OR grantor_ID = %d) ", objectUID,newOwnerID,currentOwnerID); std::string orderByClause (" ORDER BY grantee_ID "); privStatus = objectPrivsTable.selectWhere(buf, orderByClause ,rowList); if (privStatus != STATUS_GOOD) { deleteRowList(rowList); return privStatus; } for (size_t i = rowList.size(); i > 0; i--) { ObjectPrivsMDRow &currentRow = static_cast<ObjectPrivsMDRow &>(*rowList[i - 1]); if (i == 1 && currentRow.grantorID_ == currentOwnerID) { currentRow.grantorID_ = newOwnerID; currentRow.grantorName_ = newOwnerName; privStatus = objectPrivsTable.updateRow(currentRow); if (privStatus != STATUS_GOOD) { deleteRowList(rowList); return privStatus; } continue; } ObjectPrivsMDRow &previousRow = static_cast<ObjectPrivsMDRow &>(*rowList[i - 1]); // If both granted to the same ID, merge the rows, delete one, // and update the other. if (currentRow.granteeID_ == previousRow.granteeID_) { previousRow.privsBitmap_ |= currentRow.privsBitmap_; previousRow.grantableBitmap_ |= currentRow.grantableBitmap_; previousRow.grantorID_ = newOwnerID; previousRow.grantorName_ = newOwnerName; privStatus = objectPrivsTable.deleteRow(currentRow); if (privStatus != STATUS_GOOD) { deleteRowList(rowList); return privStatus; } privStatus = objectPrivsTable.updateRow(previousRow); if (privStatus != STATUS_GOOD) { deleteRowList(rowList); return privStatus; } i--; continue; } // If this is a grant from the old owner, update to the new owner. if (currentRow.grantorID_ == currentOwnerID) { currentRow.grantorID_ = newOwnerID; currentRow.grantorName_ = newOwnerName; privStatus = objectPrivsTable.updateRow(currentRow); if (privStatus != STATUS_GOOD) { deleteRowList(rowList); return privStatus; } continue; } // Grant from the new owner. Will automatically be grafted into the // tree in the next step. } deleteRowList(rowList); // Update the root node. char setClause[1000]; char whereClause[1000]; sprintf(setClause," SET GRANTEE_ID = %d, GRANTEE_NAME = '%s' ", newOwnerID,newOwnerName.c_str()); sprintf(whereClause," WHERE GRANTOR_ID = %d ",SYSTEM_AUTH_ID); privStatus = objectPrivsTable.updateWhere(setClause,whereClause); if (privStatus != STATUS_GOOD) return privStatus; return STATUS_GOOD; } // ***************************************************************************** // * Method: grantColumnPrivileges // * // * Adds or updates a row in the COLUMN_PRIVILEGES table. // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> is the unique identifier for the grantee // * <granteeName> is the name of the grantee (upper cased) // * <grantorName> is the name of the grantor (upper cased) // * <colPrivsArray> is the list of columns and privileges to grant // * <isWGOSpecified> is true then also allow the grantee to grant the set // * of privileges to other grantees // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were granted // * *: Unable to grant privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::grantColumnPrivileges( const ComObjectType objectType, const int32_t granteeID, const std::string &granteeName, const std::string &grantorName, const std::vector<ColPrivSpec> & colPrivsArrayIn, const bool isWGOSpecified) { PrivStatus privStatus = STATUS_GOOD; std::vector<ColPrivSpec> &colPrivsArray = const_cast<std::vector<ColPrivSpec> &>(colPrivsArrayIn); log (__FILE__, "checking column privileges", -1); // generate the list of column privileges granted to the object and store in // class (columnRowList_) if (generateColumnRowList() == STATUS_ERROR) return STATUS_ERROR; // get roleIDs for the grantor std::vector<int_32> roleIDs; privStatus = getRoleIDsForUserID(grantorID_,roleIDs); if (privStatus == STATUS_ERROR) return privStatus; // Determine if the grantor has WITH GRANT OPTION (WGO) for all the // columns to be granted. If not, return an error. ObjectPrivsMDTable objectPrivsTable(objectTableName_,pDiags_); ColumnPrivsMDTable columnPrivsTable(columnTableName_,pDiags_); // Grantor may have WGO from two sources, object-level grants on the object, // and column-level grants. First check the object-level grants. If there // are privileges still to grant, check for requisite column-level grants. std::vector<ColPrivEntry> grantedColPrivs; if (!hasColumnWGO(colPrivsArrayIn,roleIDs,privStatus)) { if (privStatus == STATUS_NOTFOUND) *pDiags_ << DgSqlCode(-CAT_PRIVILEGE_NOT_GRANTED); else PRIVMGR_INTERNAL_ERROR("Cannot fetch privileges"); return STATUS_ERROR; } // Grantor has authority to grant all privileges requested. See if some of // the grants are already present. (may be adding WGO) // Get existing column grants from grantor to the specified grantee. getColRowsForGranteeGrantor(columnRowList_, granteeID,grantorID_, grantedColPrivs); // Merge the column-privilege-to-grant entries (colPrivArray) into one entry // per column ordinal. // // Example: Given a commands such as // // GRANT SELECT(COL4),INSERT(COL2,COL4) ON TAB TO USER; // // three entries are generated by the parser, but only two rows are written; // one for column 2 (insert) and one for column 4 (insert and select). // // Input may have same column ordinal in multiple entries, but the input is // guaranteed not to contain same ordinal and privType more than once. std::vector<ColPrivEntry> colPrivsToGrant; for (size_t i = 0; i < colPrivsArray.size(); i++) { const ColPrivSpec &colPrivEntry = colPrivsArray[i]; ColPrivEntry *existingEntry = findColumnEntry(colPrivsToGrant, colPrivEntry.columnOrdinal); if (existingEntry != NULL) { existingEntry->privsBitmap.set(colPrivEntry.privType); if (isWGOSpecified) existingEntry->grantableBitmap.set(colPrivEntry.privType); } else { ColPrivEntry colPrivToGrant; colPrivToGrant.columnOrdinal = colPrivEntry.columnOrdinal; colPrivToGrant.privsBitmap.set(colPrivEntry.privType); if (isWGOSpecified) colPrivToGrant.grantableBitmap.set(colPrivEntry.privType); colPrivsToGrant.push_back(colPrivToGrant); } } // Walk the list of column privileges to grant, and either insert a new // row in the COLUMN_PRIVILEGES table or update an existing row. bool rowWritten = false; std::string whereBase(" WHERE object_uid = "); whereBase += UIDToString(objectUID_); whereBase += " AND grantor_id = "; whereBase += authIDToString(grantorID_); whereBase += " AND grantee_id = "; whereBase += authIDToString(granteeID); whereBase += " AND column_number = "; for (size_t i = 0; i < colPrivsToGrant.size(); i++) { ColPrivEntry &colPrivToGrant = colPrivsToGrant[i]; bool updateOperation = false; bool skipOperation = false; // Look for any existing granted privileges on the column for which // privileges are to be granted. for (size_t g = 0; g < grantedColPrivs.size(); g++) { const ColPrivEntry &grantedColPriv = grantedColPrivs[g]; // See if there is an existing column privilege granted for this column. // If not, check the next granted column privilege. If none are found // for this column, it is an insert operation. if (colPrivToGrant.columnOrdinal != grantedColPriv.columnOrdinal) continue; // An existing row with the same column has been found, it is one of four cases: // // 1) Adding a privilege (e.g., authID had SELECT, now granting INSERT) [update operation] // 2) AuthID had privilege, now adding WGO [update operation] // 3) AuthID already has privilege and/or WGO specified [skip operation] // 4) AuthID had privilege and WGO, now trying to take away WGO [error] // If the privilege bitmaps are not the same, adding a privilege. // This is an update operation, break out of for loop. if (colPrivToGrant.privsBitmap != grantedColPriv.privsBitmap) { updateOperation = true; // Case #1 colPrivToGrant.privsBitmap |= grantedColPriv.privsBitmap; colPrivToGrant.grantableBitmap |= grantedColPriv.grantableBitmap; break; } // Privilege bitmaps are the same, could be adding WGO. if (colPrivToGrant.grantableBitmap.any()) { // If WGO was specified, and adding, this is an update. // If user already has WGO, it is a NOP, so skip this entry. if (colPrivToGrant.grantableBitmap == grantedColPriv.grantableBitmap) { skipOperation = true; //Case #3 break; } // Adding WGO updateOperation = true; //Case #2 } else // WGO not specified { // If user already has WGO, error. Cannot revoke WGO via GRANT. if (grantedColPriv.grantableBitmap.any()) { *pDiags_ << DgSqlCode(-CAT_PRIVILEGE_NOT_GRANTED); //TODO: Add error for removing WGO in GRANT return STATUS_ERROR; // Case #4 } // WGO not specified, current privs same as privs to grant, // nothing to do. skipOperation = true; //Case #3 break; } updateOperation = true; colPrivToGrant.privsBitmap |= grantedColPriv.privsBitmap; colPrivToGrant.grantableBitmap |= grantedColPriv.grantableBitmap; // Found an existing row for this column ordinal, so break out of loop. break; } if (skipOperation) continue; ColumnPrivsMDRow row; row.objectUID_ = objectUID_; row.objectName_ = objectName_; row.granteeID_ = granteeID; row.granteeName_ = granteeName; row.grantorID_ = grantorID_; row.grantorName_ = grantorName; row.privsBitmap_ = colPrivToGrant.privsBitmap; row.grantableBitmap_ = colPrivToGrant.grantableBitmap; row.columnOrdinal_ = colPrivToGrant.columnOrdinal; if (updateOperation) privStatus = columnPrivsTable.updateColumnRow(row,whereBase); else privStatus = columnPrivsTable.insert(row); if (privStatus == STATUS_ERROR) return privStatus; rowWritten = true; } //TODO: Could issue a warning if no privileges were granted; means all // requested grants already exist. // if (!rowWritten) // Report Warning; return STATUS_GOOD; } //************* End of PrivMgrPrivileges::grantColumnPrivileges **************** // ***************************************************************************** // * Method: grantObjectPriv // * // * Adds or updates a row in the OBJECT_PRIVILEGES table. // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> is the unique identifier for the grantee // * <granteeName> is the name of the grantee (upper cased) // * <grantorName> is the name of the grantor (upper cased) // * <privsList> is the list of privileges to grant // * <colPrivsArray> is the list of columns and privileges to grant // * <isAllSpecified> if true then all privileges valid for the object // * type will be granted // * <isWGOSpecified> is true then also allow the grantee to grant the set // * of privileges to other grantees // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were granted // * *: Unable to grant privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::grantObjectPriv( const ComObjectType objectType, const int32_t granteeID, const std::string &granteeName, const std::string &grantorName, const std::vector<PrivType> &privsList, const std::vector<ColPrivSpec> & colPrivsArray, const bool isAllSpecified, const bool isWGOSpecified) { PrivStatus retcode = STATUS_GOOD; std::string traceMsg; log (__FILE__, "****** GRANT operation begins ******", -1); if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for grant command"); return STATUS_ERROR; } // If this grant request is called during the creation of the OBJECT_PRIVILEGES // table, just return okay. Fixes a chicken and egg problem. char theQuote = '"'; std::string nameRequested(objectName_); std::string nameToCheck(objectTableName_); // Delimited name issue. The passed in objectName may enclose name parts in // double quotes even if the name part contains only [a-z][A-Z][0-9]_ // characters. The same is true for the stored metadataLocation_. // To allow equality checks to work, we strip off the double quote delimiters // from both names. Fortunately, the double quote character is not allowed in // any SQL identifier except as delimiters - so this works. nameRequested.erase(std::remove(nameRequested.begin(), nameRequested.end(), theQuote), nameRequested.end()); nameToCheck.erase(std::remove(nameToCheck.begin(), nameToCheck.end(), theQuote), nameToCheck.end()); if (nameRequested == nameToCheck && grantorID_ == SYSTEM_AUTH_ID) return STATUS_GOOD; // If the granting to self or DB__ROOT, return an error if (grantorID_ == granteeID || granteeID == ComUser::getRootUserID()) { *pDiags_ << DgSqlCode(-CAT_CANT_GRANT_TO_SELF_OR_ROOT); return STATUS_ERROR; } // generate the list of privileges granted to the object and store in class if (generateObjectRowList() == STATUS_ERROR) return STATUS_ERROR; // get roleIDs for the grantor std::vector<int_32> roleIDs; retcode = getRoleIDsForUserID(grantorID_,roleIDs); if (retcode == STATUS_ERROR) return retcode; if (!colPrivsArray.empty()) { retcode = grantColumnPrivileges(objectType,granteeID,granteeName,grantorName, colPrivsArray,isWGOSpecified); if (retcode != STATUS_GOOD) return retcode; // If only column-level privileges were specified, no problem. if (privsList.empty()) { log (__FILE__, "****** GRANT operation succeeded ******", -1); return STATUS_GOOD; } } // verify the privileges list and create a desc to contain them PrivMgrDesc privsToGrant(granteeID); retcode = convertPrivsToDesc(objectType, isAllSpecified, isWGOSpecified, false, privsList, privsToGrant); if (retcode != STATUS_GOOD) return retcode; // check for circular dependency. If USERX grants to USERY WGO, then USERY // cannot grant back to USERX. Theoretically, USERX can grant select, update // to USERY and USERY can grant delete, insert to USERX but for simplicity, // we will reject the request independent on the set of privileges involved. std::set<int32_t> listOfGrantors; getTreeOfGrantors(grantorID_, listOfGrantors); // If we find the grantee in the list of grantors, return an error if (listOfGrantors.find(granteeID) != listOfGrantors.end()) { *pDiags_ << DgSqlCode(-CAT_CIRCULAR_PRIVS) << DgString0(grantorName.c_str()) << DgString1(granteeName.c_str()); return STATUS_ERROR; } // get privileges for the grantor and make sure the grantor can grant // at least one of the requested privileges // // SQL Ansi states that privileges that can be granted should be done so // even if some requested privilege are not grantable. PrivMgrDesc privsOfTheGrantor(grantorID_); bool hasManagePrivileges; retcode = getUserPrivs(objectType, grantorID_, roleIDs, privsOfTheGrantor, hasManagePrivileges, NULL ); if (retcode != STATUS_GOOD) return retcode; // If null, the grantor has no privileges if ( privsOfTheGrantor.isNull() ) { *pDiags_ << DgSqlCode (-CAT_PRIVILEGE_NOT_GRANTED); return STATUS_ERROR; } // Remove any privsToGrant which are not held GRANTABLE by the Grantor. // TBD: if not all privileges are grantable, should at least report // which ones were not granted. bool warnNotAll = false; if ( privsToGrant.limitToGrantable( privsOfTheGrantor ) ) { // limitToGrantable true ==> some specified privs were not grantable. if ( isAllSpecified ) { // This is ok. Can specify ALL without having ALL. } else warnNotAll = true; // Not all the specified privs are grantable. } // If nothing left to grant, we are done. if ( privsToGrant.isNull() ) { *pDiags_ << DgSqlCode (-CAT_PRIVILEGE_NOT_GRANTED); return STATUS_ERROR; } // See if grantor has previously granted privileges to the grantee bool foundRow = false; ObjectPrivsMDRow row; retcode = getGrantedPrivs(granteeID, row); if (retcode == STATUS_NOTFOUND) foundRow = false; else if (retcode == STATUS_GOOD) foundRow = true; else return retcode; // if privileges exist, set currentPrivs to existing list PrivMgrCoreDesc currentPrivs; // creates an empty descriptor if (foundRow) { PrivMgrCoreDesc tempPrivs(row.privsBitmap_, row.grantableBitmap_); currentPrivs = tempPrivs; } PrivMgrCoreDesc savedOriginalPrivs = currentPrivs; // get the list of additional privileges to grant // some privileges may have already been granted PrivMgrDesc privsToAdd = privsToGrant; PrivMgrCoreDesc::PrivResult result = privsToAdd.grantTablePrivs(currentPrivs); // nothing to grant - everything is already granted if ( result == PrivMgrCoreDesc::NONE ) return STATUS_GOOD; // Internal consistency check. We should have granted something. assert( result != PrivMgrCoreDesc::NEUTRAL ); // There is something to grant, update/insert metadata // set up row if it does not exist if (!foundRow) { row.objectUID_ = objectUID_; row.objectName_ = objectName_; row.objectType_ = objectType; row.granteeID_ = granteeID; row.granteeName_ = granteeName; row.granteeType_ = USER_GRANTEE_LIT; row.grantorID_ = grantorID_; row.grantorName_ = grantorName; row.grantorType_ = USER_GRANTOR_LIT; row.privsBitmap_ = privsToGrant.getTablePrivs().getPrivBitmap(); row.grantableBitmap_ = privsToGrant.getTablePrivs().getWgoBitmap(); } // combine privsToGrant with existing privs else { privsToGrant.unionOfPrivs(currentPrivs); row.privsBitmap_ = privsToGrant.getTablePrivs().getPrivBitmap(); row.grantableBitmap_ = privsToGrant.getTablePrivs().getWgoBitmap(); } ObjectPrivsMDTable objectPrivsTable (objectTableName_, pDiags_); char buf[1000]; if (foundRow) { ObjectUsage objectUsage; objectUsage.objectUID = objectUID_; objectUsage.objectOwner = granteeID; objectUsage.objectName = row.objectName_; objectUsage.objectType = row.objectType_; PrivMgrDesc originalPrivs (row.granteeID_); originalPrivs.setTablePrivs(savedOriginalPrivs); objectUsage.originalPrivs = originalPrivs; objectUsage.updatedPrivs = privsToGrant; // get list of all objects that need to change, the table object and // views std::vector<ObjectUsage *> listOfObjects; PrivCommand command = PrivCommand::GRANT_OBJECT; retcode = getAffectedObjects(objectUsage, command, listOfObjects); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } traceMsg = "updating all affected objects, number of objects is "; traceMsg += to_string((long long int)listOfObjects.size()); log (__FILE__, traceMsg, -1); // update the OBJECT_PRIVILEGES row for each effected object for (size_t i = 0; i < listOfObjects.size(); i++) { ObjectUsage *pObj = listOfObjects[i]; pObj->describe(traceMsg); log (__FILE__, traceMsg, i); int32_t theGrantor = (pObj->objectType == COM_VIEW_OBJECT) ? SYSTEM_AUTH_ID : grantorID_; int32_t theGrantee = pObj->objectOwner; int64_t theUID = pObj->objectUID; PrivMgrCoreDesc thePrivs = pObj->updatedPrivs.getTablePrivs(); sprintf(buf, "where grantee_id = %d and grantor_id = %d and object_uid = %ld", theGrantee, theGrantor, theUID); std::string whereClause (buf); sprintf(buf, "set privileges_bitmap = %ld, grantable_bitmap = %ld", thePrivs.getPrivBitmap().to_ulong(), thePrivs.getWgoBitmap().to_ulong()); std::string setClause (buf); // update the row retcode = objectPrivsTable.updateWhere(setClause, whereClause); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } } deleteListOfAffectedObjects(listOfObjects); } else { row.describeRow(traceMsg); traceMsg.insert(0, "adding new privilege row "); log (__FILE__, traceMsg, -1); // insert the row retcode = objectPrivsTable.insert(row); } log (__FILE__, "****** GRANT operation succeeded ******", -1); return retcode; } // ***************************************************************************** // * Method: grantObjectPriv // * // * Adds or update a row in the OBJECT_PRIVILEGES table representing the // * owner privileges. The privileges and grantable bitmaps as passed in. // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> is the unique identifier for the grantee // * <granteeName> is the name of the grantee (upper cased) // * <privBitmap> is the list of privileges to grant // * <grantableBitmap> is the grantable privileges to grant // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were granted // * *: Unable to grant privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::grantObjectPriv( const ComObjectType objectType, const int32_t granteeID, const std::string &granteeName, const PrivObjectBitmap privsBitmap, const PrivObjectBitmap grantableBitmap) { PrivStatus retcode = STATUS_GOOD; if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for grant command"); return STATUS_ERROR; } // set up the values of the row to insert ObjectPrivsMDRow row; row.objectUID_ = objectUID_; row.objectName_ = objectName_; row.objectType_ = objectType; row.granteeID_ = granteeID; row.granteeName_ = granteeName; row.granteeType_ = USER_GRANTEE_LIT; row.grantorID_ = SYSTEM_AUTH_ID; row.grantorName_ = "_SYSTEM"; row.grantorType_ = SYSTEM_GRANTOR_LIT; row.privsBitmap_ = privsBitmap; row.grantableBitmap_ = grantableBitmap; // Insert the new row, the row should not exist since the request // is coming during the creation of a new object. ObjectPrivsMDTable objectPrivsTable (objectTableName_, pDiags_); retcode = objectPrivsTable.insert(row); return retcode; } // ***************************************************************************** // * Method: grantToOwners // * // * Performs the initial grant from the system to the owner. For private // * schemas, where the creator is not the schema/object owner, a grant from // * the object owner to the creator is also performed. // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> is the unique identifier for the grantee // * <granteeName> is the name of the grantee (upper cased) // * <ownerID> is the unique identifier for the owner of the object // * <ownerName> is the name of the owner (upper cased) // * <creatorID> is the unique identifier for the creator of the object // * <creatorName> is the name of the creator (upper cased) // * // * Returns: PrivStatus // * // * STATUS_GOOD: All DML privs were granted // * *: Not all privs were granted. Error in CLI diags area. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::grantToOwners( const ComObjectType objectType, const Int32 granteeID, const std::string & granteeName, const Int32 ownerID, const std::string & ownerName, const Int32 creatorID, const std::string & creatorName) { ObjectPrivsMDRow row; PrivMgrCoreDesc corePrivs; PrivObjectBitmap privsBitmap; PrivObjectBitmap grantableBitmap; corePrivs.setAllObjectGrantPrivilege(objectType,true); privsBitmap = corePrivs.getPrivBitmap(); grantableBitmap = corePrivs.getWgoBitmap(); // Add the root grant from the system. row.objectUID_ = objectUID_; row.objectName_ = objectName_; row.objectType_ = objectType; row.granteeID_ = ownerID; row.granteeName_ = ownerName; row.granteeType_ = USER_GRANTEE_LIT; row.grantorID_ = SYSTEM_AUTH_ID; row.grantorName_ = SYSTEM_AUTH_NAME; row.grantorType_ = COM_SYSTEM_GRANTOR_LIT; row.privsBitmap_ = privsBitmap; row.grantableBitmap_ = grantableBitmap; ObjectPrivsMDTable objectPrivsTable(objectTableName_,pDiags_); PrivStatus privStatus = objectPrivsTable.insert(row); if (privStatus != STATUS_GOOD) return privStatus; // If the owner and creator are the same, we are done. // If not, this is an object being created in a private schema, and // we need to grant privileges to the creator. If the creator is DB__ROOT, // no need to grant privileges. // // This creator grant may be controlled by a CQD in the future. if (ownerID == creatorID || creatorID == ComUser::getRootUserID()) return STATUS_GOOD; // Add a grant from the private schema owner to the creator. row.grantorID_ = row.granteeID_; row.grantorName_ = row.granteeName_; row.grantorType_ = USER_GRANTOR_LIT; row.granteeID_ = creatorID; row.granteeName_ = creatorName; return objectPrivsTable.insert(row); } // ***************************************************************************** // * Method: insertPrivRowsForObject // * // * writes rows that add grants of privileges for an object. // * // * Parameters: // * // * <objectPrivsRows> One or more rows of grants for the object. // * // * Returns: PrivStatus // * // * STATUS_GOOD : Rows were returned // * STATUS_NOTFOUND: No rows were returned // * *: Unable to read privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::insertPrivRowsForObject( const int64_t objectUID, const std::vector<ObjectPrivsRow> & objectPrivsRows) { PrivStatus retcode = STATUS_GOOD; if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for insertPrivRowsForObject()"); return STATUS_ERROR; } ObjectPrivsMDTable objectPrivsTable(objectTableName_,pDiags_); for (int32_t i = 0; i < objectPrivsRows.size();++i) { ObjectPrivsMDRow row; const ObjectPrivsRow &rowIn = objectPrivsRows[i]; char granteeTypeString[3] = {0}; char grantorTypeString[3] = {0}; row.objectUID_ = objectUID_; row.objectName_ = rowIn.objectName; row.objectType_ = rowIn.objectType; row.granteeID_ = rowIn.granteeID; row.granteeName_ = rowIn.granteeName; CmGetComGranteeAsLit(rowIn.granteeType,granteeTypeString); row.granteeType_ = granteeTypeString; row.grantorID_ = rowIn.grantorID; row.grantorName_ = rowIn.grantorName; CmGetComGrantorAsLit(rowIn.grantorType,grantorTypeString); row.grantorType_ = grantorTypeString; row.privsBitmap_ = rowIn.privilegesBitmap; row.grantableBitmap_ = rowIn.grantableBitmap; retcode = objectPrivsTable.insert(row); if (retcode != STATUS_GOOD) return retcode; } return retcode; } // **************************************************************************** // method: dealWithConstraints // // This method finds all the constraints associated with the table and // determines if any are adversely affected by the privilege change. // // Params: // objectUsage - the affected object // listOfAffectedObjects - returns the list of affected objects // // Returns: PrivStatus // STATUS_GOOD: No problems were encountered // *: Errors were encountered, ComDiags area is set up // // **************************************************************************** PrivStatus PrivMgrPrivileges::dealWithConstraints( const ObjectUsage &objectUsage, std::vector<ObjectUsage *> &listOfAffectedObjects) { PrivStatus retcode = STATUS_GOOD; std::string traceMsg; objectUsage.describe(traceMsg); traceMsg.insert (0, "checking referencing constraints for "); log (__FILE__, traceMsg, -1); // RI constraints can only be defined for base tables if (objectUsage.objectType != COM_BASE_TABLE_OBJECT) return STATUS_GOOD; // get the underlying tables for all RI constraints that reference the object std::vector<ObjectReference *> objectList; PrivMgrMDAdmin admin(trafMetadataLocation_, metadataLocation_, pDiags_); retcode = admin.getReferencingTablesForConstraints(objectUsage, objectList); traceMsg = "getting constraint usages: number usages found "; traceMsg += to_string((long long int)objectList.size()); traceMsg += ", retcode is "; traceMsg += privStatusEnumToLit(retcode); log (__FILE__, traceMsg, -1); if (retcode == STATUS_ERROR) return retcode; // objectList contain ObjectReferences for all tables that reference the // ObjectUsage (object losing privilege) through an RI constraint PrivMgrDesc originalPrivs; PrivMgrDesc currentPrivs; int32_t lastObjectOwnerID = 0; std::vector<int32_t> roleIDs; for (size_t i = 0; i < objectList.size(); i++) { ObjectReference *pObj = objectList[i]; pObj->describe(traceMsg); log (__FILE__, traceMsg, i); if (lastObjectOwnerID != pObj->objectOwner) { roleIDs.clear(); retcode = getRoleIDsForUserID(pObj->objectOwner,roleIDs); if (retcode == STATUS_ERROR) return retcode; } // get the summarized original and current privs for the referencing table // current privs contains any adjustments retcode = summarizeCurrentAndOriginalPrivs(pObj->objectUID, pObj->objectOwner, roleIDs, listOfAffectedObjects, originalPrivs, currentPrivs); if (retcode != STATUS_GOOD) return retcode; // If the underlying table no long has REFERENCES privileges return // a dependency error. PrivMgrCoreDesc thePrivs = objectUsage.updatedPrivs.getTablePrivs(); if (!thePrivs.getPriv(REFERENCES_PRIV)) { std::string referencingTable; if (admin.getConstraintName(objectUsage.objectUID, pObj->objectUID, referencingTable) == false) { referencingTable = "UNKNOWN, Referencing table ID is "; referencingTable += UIDToString(pObj->objectUID); } *pDiags_ << DgSqlCode (-CAT_DEPENDENT_OBJECTS_EXIST) << DgString0 (referencingTable.c_str()); return STATUS_ERROR; } } return STATUS_GOOD; } // **************************************************************************** // method: dealWithUdrs // // This method finds all the udrs associated with the library and // determines if any are adversely affected by the privilege change. // // Params: // objectUsage - the affected object // listOfAffectedObjects - returns the list of affected objects // // Returns: PrivStatus // STATUS_GOOD: No problems were encountered // *: Errors were encountered, ComDiags area is set up // // **************************************************************************** PrivStatus PrivMgrPrivileges::dealWithUdrs( const ObjectUsage &objectUsage, std::vector<ObjectUsage *> &listOfAffectedObjects) { PrivStatus retcode = STATUS_GOOD; // udrs (functions and procedures) can only be defined for in libraries if (objectUsage.objectType != COM_LIBRARY_OBJECT) return STATUS_GOOD; std::string traceMsg; objectUsage.describe(traceMsg); traceMsg.insert (0, "checking referencing routines for "); log (__FILE__, traceMsg, -1); // get the udrs that reference the library for the grantee std::vector<ObjectReference *> objectList; PrivMgrMDAdmin admin(trafMetadataLocation_, metadataLocation_, pDiags_); retcode = admin.getUdrsThatReferenceLibrary(objectUsage, objectList); traceMsg = "getting routine usages: number usages found "; traceMsg += to_string((long long int)objectList.size()); traceMsg += ", retcode is "; traceMsg += privStatusEnumToLit(retcode); log (__FILE__, traceMsg, -1); if (retcode == STATUS_ERROR) return retcode; // objectList contain ObjectReferences for all udrs that reference the // ObjectUsage (object losing privilege) through a library PrivMgrDesc originalPrivs; PrivMgrDesc currentPrivs; if (objectList.size() > 0) { std::vector<int32_t> roleIDs; retcode = getRoleIDsForUserID(objectUsage.objectOwner,roleIDs); if (retcode == STATUS_ERROR) return retcode; // if the grantee owns any udrs, get the summarized original and current // privs for the library // current privs contains any adjustments retcode = summarizeCurrentAndOriginalPrivs(objectUsage.objectUID, objectUsage.objectOwner, roleIDs, listOfAffectedObjects, originalPrivs, currentPrivs); if (retcode != STATUS_GOOD) return retcode; // If the udr can no longer be created due to lack of USAGE privilege, // return a dependency error. PrivMgrCoreDesc thePrivs = objectUsage.updatedPrivs.getTablePrivs(); if (!thePrivs.getPriv(USAGE_PRIV)) { // There could be multiple udrs, just pick the first one in the list // for the error message. ObjectReference *pObj = objectList[0]; *pDiags_ << DgSqlCode (-CAT_DEPENDENT_OBJECTS_EXIST) << DgString0 (pObj->objectName.c_str()); return STATUS_ERROR; } } return STATUS_GOOD; } // **************************************************************************** // method: dealWithViews // // This method finds all the views that referenced the object. // This method recursively calls itself to find other views referenced in the // tree of referencing views // // Params: // objectUsage - the affected object // command - type of command - grant, revoke restrict, revoke cascade // listOfAffectedObjects - returns the list of affected objects // // Returns: PrivStatus // STATUS_GOOD: No problems were encountered // *: Errors were encountered, ComDiags area is set up // // In the future, we want to cache the lists of objects instead of going to the // metadata everytime. // **************************************************************************** PrivStatus PrivMgrPrivileges::dealWithViews( const ObjectUsage &objectUsage, const PrivCommand command, std::vector<ObjectUsage *> &listOfAffectedObjects) { PrivStatus retcode = STATUS_GOOD; std::string traceMsg; objectUsage.describe(traceMsg); traceMsg.insert (0, "checking referencing views for "); log (__FILE__, traceMsg, -1); // Get any views that referenced this object to see if the privilege changes // should be propagated std::vector<ViewUsage> viewUsages; PrivMgrMDAdmin admin(trafMetadataLocation_, metadataLocation_, pDiags_); retcode = admin.getViewsThatReferenceObject(objectUsage, viewUsages); traceMsg = "getting view usages: number usages found "; traceMsg += to_string((long long int)viewUsages.size()); traceMsg += ", retcode is "; traceMsg += privStatusEnumToLit(retcode); log (__FILE__, traceMsg, -1); if (retcode == STATUS_NOTFOUND) return STATUS_GOOD; if (retcode != STATUS_GOOD && retcode != STATUS_WARNING) return retcode; // for each entry in the viewUsages list calculate the changed // privileges and call dealWithViews recursively for (size_t i = 0; i < viewUsages.size(); i++) { ViewUsage viewUsage = viewUsages[i]; viewUsage.describe(traceMsg); log (__FILE__, traceMsg, i); // this method recreates privileges for the view based on the original // and the current. Updated descriptors are stored in the viewUsage // structure. retcode = gatherViewPrivileges(viewUsage, listOfAffectedObjects); traceMsg = "gathered view privs: retcode is "; traceMsg += privStatusEnumToLit(retcode); log (__FILE__, traceMsg, -1); if (retcode != STATUS_GOOD && retcode != STATUS_WARNING) { return retcode; } // check to see if privileges changed if (viewUsage.originalPrivs == viewUsage.updatedPrivs) {} else { // this view is affected by the grant/revoke request, add to list // and check to see if anything down stream needs to change ObjectUsage *pUsage = new (ObjectUsage); pUsage->objectUID = viewUsage.viewUID; pUsage->objectOwner = viewUsage.viewOwner; pUsage->objectName = viewUsage.viewName; pUsage->objectType = COM_VIEW_OBJECT; pUsage->originalPrivs = viewUsage.originalPrivs; pUsage->updatedPrivs = viewUsage.updatedPrivs; listOfAffectedObjects.push_back(pUsage); traceMsg = "adding new objectUsage for "; pUsage->describe(traceMsg); log (__FILE__, traceMsg, i); retcode = dealWithViews(*pUsage, command, listOfAffectedObjects); if (retcode != STATUS_GOOD && retcode != STATUS_WARNING) return retcode; } } return STATUS_GOOD; } // ---------------------------------------------------------------------------- // method: gatherViewPrivileges // // This method gathers privileges for the view both the original and current // // parameters: // viewUsage - description of the view // listOfAffectedObjects - list of changed privileges so far // // Returns: PrivStatus // // STATUS_GOOD: Privileges were gathered // *: Unable to gather privileges, see diags. // // ---------------------------------------------------------------------------- PrivStatus PrivMgrPrivileges::gatherViewPrivileges( ViewUsage &viewUsage, const std::vector<ObjectUsage *> listOfAffectedObjects) { PrivStatus retcode = STATUS_GOOD; std::string traceMsg; // initialize summarized descriptors and set all applicable privileges // TBD: if view is not updatable, should initialize correctly. // views have same privileges as tables bool setWGOtrue = true; PrivMgrDesc summarizedOriginalPrivs; summarizedOriginalPrivs.setAllTableGrantPrivileges(setWGOtrue); PrivMgrDesc summarizedCurrentPrivs; summarizedCurrentPrivs.setAllTableGrantPrivileges(setWGOtrue); // Get list of objects referenced by the view std::vector<ObjectReference *> objectList; PrivMgrMDAdmin admin(trafMetadataLocation_, metadataLocation_, pDiags_); retcode = admin.getObjectsThatViewReferences(viewUsage, objectList); traceMsg += "getting object references: number references found "; traceMsg += to_string((long long int)objectList.size()); traceMsg += ", retcode is "; traceMsg += privStatusEnumToLit(retcode); log (__FILE__, traceMsg, -1); if (retcode == STATUS_ERROR) return retcode; // For each referenced object, summarize the original and current // privileges PrivMgrDesc originalPrivs; PrivMgrDesc currentPrivs; int32_t lastObjectOwnerID = 0; std::vector<int32_t> roleIDs; for (size_t i = 0; i < objectList.size(); i++) { ObjectReference *pObj = objectList[i]; pObj->describe(traceMsg); log (__FILE__, traceMsg, i); if (lastObjectOwnerID != pObj->objectOwner) { roleIDs.clear(); retcode = getRoleIDsForUserID(pObj->objectOwner,roleIDs); if (retcode == STATUS_ERROR) return retcode; } // get the summarized original and current privs for the // referenced object that have been granted to the view owner // listOfAffectedObjects contain the privilege adjustments needed // to generate the current privs retcode = summarizeCurrentAndOriginalPrivs(pObj->objectUID, viewUsage.viewOwner, roleIDs, listOfAffectedObjects, originalPrivs, currentPrivs); if (retcode != STATUS_GOOD) return retcode; // If user no longer has select privilege on referenced object // returns an error // When cascade is supported, then referenced objects will be removed //if (command == PrivCommand::REVOKE_OBJECT_RESTRICT ) //{ PrivMgrCoreDesc thePrivs = currentPrivs.getTablePrivs(); if (!thePrivs.getPriv(SELECT_PRIV)) { *pDiags_ << DgSqlCode (-CAT_DEPENDENT_OBJECTS_EXIST) << DgString0 (viewUsage.viewName.c_str()); return STATUS_ERROR; } //} // add the returned privilege to the summarized privileges // for all objects summarizedOriginalPrivs.intersectionOfPrivs(originalPrivs); summarizedCurrentPrivs.intersectionOfPrivs(currentPrivs); originalPrivs.resetTablePrivs(); currentPrivs.resetTablePrivs(); } // Update view usage with summarized privileges viewUsage.originalPrivs = summarizedOriginalPrivs; viewUsage.updatedPrivs = summarizedCurrentPrivs; return STATUS_GOOD; } // **************************************************************************** // method: generateColumnRowList // // generate the list of privileges granted to columns of the object and // store in columnRowList_ class member // // The list is ordered by grantor/grantee/column_number // // Returns: // STATUS_GOOD - list of rows was generated // STATUS_NOTFOUND - no column privileges were found // STATUS_ERROR - the diags area is populated with any errors // **************************************************************************** PrivStatus PrivMgrPrivileges::generateColumnRowList() { std::string whereClause ("where object_uid = "); whereClause += UIDToString(objectUID_); std::string orderByClause (" order by grantor_id, grantee_id, column_number "); ColumnPrivsMDTable columnPrivsTable(columnTableName_,pDiags_); PrivStatus privStatus = columnPrivsTable.selectWhere(whereClause, orderByClause, columnRowList_); std::string traceMsg ("getting column privileges, number privileges is "); traceMsg += to_string((long long int)columnRowList_.size()); log (__FILE__, traceMsg, -1); for (size_t i = 0; i < columnRowList_.size(); i++) { ColumnPrivsMDRow privRow = static_cast<ColumnPrivsMDRow &> (*columnRowList_[i]); privRow.describeRow(traceMsg); log (__FILE__, traceMsg, i); } return privStatus; } // **************************************************************************** // method: generateObjectRowList // // generate the list of privileges granted to the object and // store in objectRowList_ class member // // The list is ordered by grantor/grantee // // Returns: // STATUS_GOOD - list of rows was generated // STATUS_NOTFOUND - in most cases, there should be at least one row in the // OBJECT_PRIVILEGES table. But there is at least one case // where this is not true - trying to get privilege info // for indexes when using the table (index_table) syntax. // STATUS_ERROR - the diags area is populated with any errors // **************************************************************************** PrivStatus PrivMgrPrivileges::generateObjectRowList() { std::string whereClause ("where object_uid = "); whereClause += UIDToString(objectUID_); std::string orderByClause(" order by grantor_id, grantee_id "); ObjectPrivsMDTable objectPrivsTable(objectTableName_,pDiags_); PrivStatus privStatus = objectPrivsTable.selectWhere(whereClause, orderByClause, objectRowList_); std::string traceMsg ("getting object privileges, number privleges is "); traceMsg += to_string((long long int)objectRowList_.size()); log (__FILE__, traceMsg, -1); for (size_t i = 0; i < objectRowList_.size(); i++) { ObjectPrivsMDRow privRow = static_cast<ObjectPrivsMDRow &> (*objectRowList_[i]); privRow.describeRow(traceMsg); log (__FILE__, traceMsg, i); } return privStatus; } // **************************************************************************** // method: getAffectedObjects // // This method adds the current object to the listOfAffectedObjects and then // looks for dependent objects such as constraints and views that will be // affected by the privilege change. // // Params: // objectUsage - the affected object // command - GRANT or REVOKE RESTRICT or REVOKE CASCADE // listOfAffectedObjects - returns the list of affected objects // **************************************************************************** PrivStatus PrivMgrPrivileges::getAffectedObjects( const ObjectUsage &objectUsage, const PrivCommand command, std::vector<ObjectUsage *> &listOfAffectedObjects) { PrivStatus retcode = STATUS_GOOD; std::string traceMsg; // found an object whose privileges need to be updated ObjectUsage *pUsage = new (ObjectUsage); pUsage->objectUID = objectUsage.objectUID; pUsage->objectOwner = objectUsage.objectOwner; pUsage->objectName = objectUsage.objectName; pUsage->objectType = objectUsage.objectType; pUsage->originalPrivs = objectUsage.originalPrivs; pUsage->updatedPrivs = objectUsage.updatedPrivs; listOfAffectedObjects.push_back(pUsage); // Find list of affected constraints if (command != PrivCommand::GRANT_OBJECT) { retcode = dealWithConstraints (objectUsage, listOfAffectedObjects); if (retcode != STATUS_GOOD && retcode != STATUS_WARNING) return retcode; retcode = dealWithUdrs (objectUsage, listOfAffectedObjects); if (retcode != STATUS_GOOD && retcode != STATUS_WARNING) return retcode; } // Find list of affected views retcode = dealWithViews (objectUsage, command, listOfAffectedObjects); if (retcode != STATUS_GOOD && retcode != STATUS_WARNING) return retcode; return STATUS_GOOD; } // ---------------------------------------------------------------------------- // method: getGrantedPrivs // // This method reads the metadata to get privilege information for the // object, grantor, and grantee. // // input: granteeID // output: a row from the object_privileges table describing privilege details // // Returns: PrivStatus // // STATUS_GOOD: row was found (and returned) // STATUS_NOTFOUND: no privileges have been granted // ---------------------------------------------------------------------------- PrivStatus PrivMgrPrivileges::getGrantedPrivs( const int32_t granteeID, PrivMgrMDRow &rowOut) { ObjectPrivsMDRow & row = static_cast<ObjectPrivsMDRow &>(rowOut); for (size_t i = 0; i < objectRowList_.size(); i++) { ObjectPrivsMDRow privRow = static_cast<ObjectPrivsMDRow &> (*objectRowList_[i]); if (privRow.grantorID_ == grantorID_ && privRow.granteeID_ == granteeID) { row = privRow; return STATUS_GOOD; } } return STATUS_NOTFOUND; } // ---------------------------------------------------------------------------- // method: getGrantorDetailsForObject // // returns the effective grantor ID and grantor name for grant and revoke // object statements // // Input: // isGrantedBySpecified - true if grant request included a GRANTED BY clause // grantedByName - name specified in GRANTED BY clause // objectOwner - owner of object that is the subject for the grant or revoke // // Output: // effectiveGrantorID - the ID to use for grant and revoke // effectiveGrantorName - the name to use for grant and revoke // // returns PrivStatus with the results of the operation. The diags area // contains error details. // ---------------------------------------------------------------------------- PrivStatus PrivMgrPrivileges::getGrantorDetailsForObject( const bool isGrantedBySpecified, const std::string grantedByName, const int_32 objectOwner, int_32 &effectiveGrantorID, std::string &effectiveGrantorName) { int_32 currentUser = ComUser::getCurrentUser(); short retcode = 0; if (!isGrantedBySpecified) { // If the user is DB__ROOT, a grant or revoke operation is implicitly on // behalf of the object owner. Likewise, if a user has been granted the // MANAGE_PRIVILEGES component-level privilege they can grant on // behalf of the owner implicitly. Otherwise, the grantor is the user. if (!ComUser::isRootUserID()) { PrivMgrComponentPrivileges componentPrivileges(metadataLocation_,pDiags_); if (!componentPrivileges.hasSQLPriv(currentUser,SQLOperation::MANAGE_PRIVILEGES, true)) { effectiveGrantorName = ComUser::getCurrentUsername(); effectiveGrantorID = currentUser; return STATUS_GOOD; } } // User is DB__ROOT. Get the effective grantor name. char authName[MAX_USERNAME_LEN+1]; Int32 actualLen = 0; retcode = ComUser::getAuthNameFromAuthID(objectOwner,authName, MAX_USERNAME_LEN,actualLen); if (retcode != FEOK) { *pDiags_ << DgSqlCode(-20235) << DgInt0(retcode) << DgInt1(objectOwner); return STATUS_ERROR; } effectiveGrantorID = objectOwner; effectiveGrantorName = authName; return STATUS_GOOD; } // GRANTED BY was specified, first see if authorization name is valid. Then // determine if user has authority to use the clause. // Get the grantor ID from the grantorName retcode = ComUser::getAuthIDFromAuthName(grantedByName.c_str(),effectiveGrantorID); if (retcode == FENOTFOUND) { *pDiags_ << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR) << DgString0(grantedByName.c_str()); return STATUS_ERROR; } if (retcode != FEOK) { *pDiags_ << DgSqlCode(-20235) << DgInt0(retcode) << DgInt1(objectOwner); return STATUS_ERROR; } effectiveGrantorName = grantedByName; // Name exists, does user have authority? // // GRANTED BY is allowed if any of the following are true: // // 1) The user is DB__ROOT. // 2) The user is owner of the object. // 3) The user has been granted the MANAGE_PRIVILEGES component-level privilege. // 4) The grantor is a role and the user has been granted the role. if (ComUser::isRootUserID() || currentUser == objectOwner) return STATUS_GOOD; PrivMgrComponentPrivileges componentPrivileges(metadataLocation_,pDiags_); if (componentPrivileges.hasSQLPriv(currentUser,SQLOperation::MANAGE_PRIVILEGES, true)) return STATUS_GOOD; // If the grantor is not a role, user does not have authority. if (!isRoleID(effectiveGrantorID)) { *pDiags_ << DgSqlCode(-CAT_NOT_AUTHORIZED); return STATUS_ERROR; } // Role specified in BY clause must be granted to the current user for user // to have authority. PrivMgrRoles roles(trafMetadataLocation_,metadataLocation_,pDiags_); if (roles.hasRole(currentUser,effectiveGrantorID)) return STATUS_GOOD; *pDiags_ << DgSqlCode(-CAT_NOT_AUTHORIZED); return STATUS_ERROR; } // ***************************************************************************** // * Method: revokeColumnPrivileges // * // * Adds or updates a row in the COLUMN_PRIVILEGES table. // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> is the unique identifier for the grantee // * <granteeName> is the name of the grantee (upper cased) // * <grantorName> is the name of the grantor (upper cased) // * <colPrivsArray> is the list of columns and privileges to grant // * <isWGOSpecified> is true then also allow the grantee to grant the set // * of privileges to other grantees // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were revoked // * *: Unable to revoke privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::revokeColumnPrivileges( const ComObjectType objectType, const int32_t granteeID, const std::string & granteeName, const std::string & grantorName, const std::vector<ColPrivSpec> & colPrivsArrayIn, const bool isWGOSpecified) { PrivStatus privStatus = STATUS_GOOD; log (__FILE__, "checking column privileges", -1); std::vector<ColPrivSpec> &colPrivsArray = const_cast<std::vector<ColPrivSpec> &>(colPrivsArrayIn); ColumnPrivsMDTable columnPrivsTable(columnTableName_,pDiags_); std::string privilege; std::vector<ColPrivEntry> grantedColPrivs; // get the list of column privileges for the object if (generateColumnRowList() == STATUS_ERROR) return STATUS_ERROR; // First verify the grantor has granted all the privileges they wish to revoke. // If not, report the first privilege that cannot be revoked. if (!hasGrantedColumnPriv(columnRowList_,grantorID_,granteeID, colPrivsArrayIn,privStatus,privilege,grantedColPrivs)) { if (privStatus == STATUS_NOTFOUND) { std::string privOnObject(privilege + " on "); privOnObject += objectName_; *pDiags_ << DgSqlCode(-CAT_GRANT_NOT_FOUND) << DgString0(privOnObject.c_str()) << DgString1(grantorName.c_str()) << DgString2(granteeName.c_str()); return STATUS_ERROR; } return privStatus; } // Create a privsToRevoke array using the passed in revoke entries and the // list of currently granted column privileges. Combine multiple privileges // for the same column into one entry. std::vector<ColPrivEntry> colPrivsToRevoke; for (size_t i = 0; i < colPrivsArray.size(); i++) { const ColPrivSpec &colPrivSpecEntry = colPrivsArray[i]; ColPrivEntry *existingEntry = findColumnEntry(colPrivsToRevoke, colPrivSpecEntry.columnOrdinal); if (existingEntry != NULL) existingEntry->privsBitmap.set(colPrivSpecEntry.privType); else { ColPrivEntry colPrivToRevoke; colPrivToRevoke.columnOrdinal = colPrivSpecEntry.columnOrdinal; colPrivToRevoke.privsBitmap.set(colPrivSpecEntry.privType); colPrivsToRevoke.push_back(colPrivToRevoke); } } // At this point we have an array of privsToRevoke with column ordinal and // priv bitmap. // // Three revoke column cases: // // Spec Spec Priv bitmap compare // GOF Priv to granted priv bitmap Action // T 1 NA Removing WGO only. Update operation. // Reset privType bit in grantable bitmap, // copy priv bitmap from granted privs. // // F 1 Equal Revoking all privs on this column, plus // WGO. Delete operation. // // F 1 Not equal Revoking some privs on this column plus // WGO for the revoked privs. Reset bits in // both bitmaps. Update operation. //TODO: When revoking WGO, need to check for dependent objects, e.g. views. bool rowRevoked = false; PrivColumnBitmap revokedPrivs; std::string whereBase(" WHERE object_uid = "); whereBase += UIDToString(objectUID_); whereBase += " AND grantor_id = "; whereBase += authIDToString(grantorID_); whereBase += " AND grantee_id = "; whereBase += authIDToString(granteeID); whereBase += " AND column_number = "; for (size_t i = 0; i < colPrivsToRevoke.size(); i++) { ColPrivEntry &colPrivToRevoke = colPrivsToRevoke[i]; bool updateRow = false; bool deleteRow = false; // Look for any existing granted privileges on the column for which // privileges are to be granted. for (size_t g = 0; g < grantedColPrivs.size(); g++) { const ColPrivEntry &grantedColPriv = grantedColPrivs[g]; // See if there is an existing column privilege granted for this column. // If not, check the next granted column privilege. If none are found // for this column, it is an internal error. if (colPrivToRevoke.columnOrdinal != grantedColPriv.columnOrdinal) continue; // Found row with grant for this column. // Verify privilge(s) being revoked was/were granted. If not, internal error. if ((colPrivToRevoke.privsBitmap & grantedColPriv.privsBitmap) == 0) { PRIVMGR_INTERNAL_ERROR("Privilege to revoke not found"); return STATUS_ERROR; } if (isWGOSpecified) { // We want to clear the corresponding bits in the grantable bitmap. // Flip the bits of the privs to revoke bitmap, then and the // negation with the current grantable bitmap. Not revoking any // privileges, so update with current priv bitmap. PrivColumnBitmap revokeBitmap = ~colPrivToRevoke.privsBitmap; colPrivToRevoke.privsBitmap = grantedColPriv.privsBitmap; colPrivToRevoke.grantableBitmap = grantedColPriv.grantableBitmap & revokeBitmap; updateRow = true; } else { if (colPrivToRevoke.privsBitmap == grantedColPriv.privsBitmap) deleteRow = true; else { PrivColumnBitmap revokeBitmap = ~colPrivToRevoke.privsBitmap; colPrivToRevoke.privsBitmap = grantedColPriv.privsBitmap & revokeBitmap; colPrivToRevoke.grantableBitmap = grantedColPriv.grantableBitmap & revokeBitmap; updateRow = true; } revokedPrivs |= colPrivToRevoke.privsBitmap; } break; } if (deleteRow) { std::string whereClause(whereBase + authIDToString(colPrivToRevoke.columnOrdinal)); privStatus = columnPrivsTable.deleteWhere(whereClause); if (privStatus == STATUS_ERROR) return privStatus; rowRevoked = true; continue; } if (!updateRow) { PRIVMGR_INTERNAL_ERROR("Column privilege not found to revoke"); return STATUS_ERROR; } ColumnPrivsMDRow row; row.objectUID_ = objectUID_; row.objectName_ = objectName_; row.granteeID_ = granteeID; row.granteeName_ = granteeName; row.grantorID_ = grantorID_; row.grantorName_ = grantorName; row.privsBitmap_ = colPrivToRevoke.privsBitmap; row.grantableBitmap_ = colPrivToRevoke.grantableBitmap; row.columnOrdinal_ = colPrivToRevoke.columnOrdinal; privStatus = columnPrivsTable.updateColumnRow(row,whereBase); if (privStatus == STATUS_ERROR) return privStatus; rowRevoked = true; } // Send revoked privs to RMS SQL_QIKEY siKeyList[NBR_DML_COL_PRIVS]; size_t siIndex = 0; for (size_t i = FIRST_DML_COL_PRIV; i <= LAST_DML_COL_PRIV; i++ ) { if (!revokedPrivs.test(PrivType(i))) continue; ComSecurityKey secKey(granteeID,objectUID_,PrivType(i), ComSecurityKey::OBJECT_IS_OBJECT); siKeyList[siIndex].revokeKey.subject = secKey.getSubjectHashValue(); siKeyList[siIndex].revokeKey.object = secKey.getObjectHashValue(); std::string actionString; secKey.getSecurityKeyTypeAsLit(actionString); strncpy(siKeyList[siIndex].operation,actionString.c_str(),2); siIndex++; } if (siIndex > 0) SQL_EXEC_SetSecInvalidKeys(siIndex,siKeyList); // if (!rowRevoked) // Warning // ; return STATUS_GOOD; } //************* End of PrivMgrPrivileges::revokeColumnPrivileges *************** // ***************************************************************************** // * Method: revokeObjectPriv // * // * Deletes or updates a row in the OBJECT_PRIVILEGES table. // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> is the unique identifier for the grantee // * <privsList> is the list of privileges to revoke // * <isAllSpecified> if true then all privileges valid for the object // * type will be revoked // * <isGOFSpecified> if true then remove the ability for the grantee // * to revoke the set of privileges to other grantees // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were granted // * *: Unable to grant privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::revokeObjectPriv (const ComObjectType objectType, const int32_t granteeID, const std::string & granteeName, const std::string & grantorName, const std::vector<PrivType> &privsList, const std::vector<ColPrivSpec> & colPrivsArray, const bool isAllSpecified, const bool isGOFSpecified) { PrivStatus retcode = STATUS_GOOD; std::string traceMsg; log (__FILE__, "****** REVOKE operation begins ******", -1); if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for revoke command"); return STATUS_ERROR; } // get roleIDs for grantor std::vector<int_32> roleIDs; retcode = getRoleIDsForUserID(grantorID_,roleIDs); if (retcode == STATUS_ERROR) return retcode; if (!colPrivsArray.empty()) { retcode = revokeColumnPrivileges(objectType,granteeID,granteeName, grantorName,colPrivsArray,isGOFSpecified); if (retcode != STATUS_GOOD) return retcode; // If only column-level privileges were specified, no problem. if (privsList.empty()) { log (__FILE__, "****** REVOKE operation succeeded ******", -1); return STATUS_GOOD; } } // Convert the privsList into a PrivMgrDesc // convertPrivsToDesc sets up any errors in the diags area // revokeWGOWithPriv and isGOFSpecified interaction: // isGOFSpecified is true if only GRANT OPTION FOR is being revoked. // The privilege will still be available but the user can no longer // grant the privilege to others. // revokeWGOWithPriv is always set to true. This means that both the // priv and wgo is revoked. It does not make sense to revoke the priv // and not the WITH GRANT OPTION option. bool revokeWGOWithPriv = true; PrivMgrDesc privsToRevoke(granteeID); retcode = convertPrivsToDesc(objectType, isAllSpecified, revokeWGOWithPriv, isGOFSpecified, privsList, privsToRevoke); if (retcode != STATUS_GOOD) return retcode; // get all privilege descriptors for the object if (generateObjectRowList() == STATUS_ERROR) return STATUS_ERROR; // get privileges for the grantor and make sure the grantor can revoke // at least one of the requested privileges PrivMgrDesc privsOfTheGrantor(grantorID_); bool hasManagePrivileges; retcode = getUserPrivs(objectType, grantorID_, roleIDs, privsOfTheGrantor, hasManagePrivileges, NULL ); if (retcode != STATUS_GOOD) return retcode; // If null, the grantor has no privileges if ( privsOfTheGrantor.isNull() ) { *pDiags_ << DgSqlCode (-CAT_PRIVILEGE_NOT_REVOKED); return STATUS_ERROR; } // Remove any privsToRevoke which are not held grantable by the Grantor. // If limitToGrantable returns true, some privs are not revokable. bool warnNotAll = false; if ( privsToRevoke.limitToGrantable( privsOfTheGrantor ) ) { if ( isAllSpecified ) { // This is ok. Can specify ALL without having all privileges set. } else warnNotAll = true; // Not all the specified privs can be revoked } // If nothing left to revoke, we are done. if ( privsToRevoke.isNull() ) { *pDiags_ << DgSqlCode (-CAT_PRIVILEGE_NOT_REVOKED); return STATUS_ERROR; } // See if grantor has previously granted privileges to the grantee ObjectPrivsMDRow row; retcode = getGrantedPrivs(granteeID, row); if (retcode == STATUS_NOTFOUND) { // Set up parameters for the error message: privileges, grantor, & grantee // privilege list std::string privListStr; for (size_t i = 0; i < privsList.size(); i++) privListStr += PrivMgrUserPrivs::convertPrivTypeToLiteral(privsList[i]) + ", "; // Remove the last ", " privListStr.erase(privListStr.length()-2, privListStr.length()); if (isGOFSpecified) privListStr += " WITH GRANT OPTION"; *pDiags_ << DgSqlCode (CAT_GRANT_NOT_FOUND) << DgString0 (privListStr.c_str()) << DgString1 (grantorName.c_str()) <<DgString2 (granteeName.c_str()); return STATUS_WARNING; } if (retcode != STATUS_GOOD) return retcode; // if privileges exist, set currentPrivs to existing list // save a copy of the original privs PrivMgrCoreDesc currentPrivs; // creates an empty descriptor PrivMgrCoreDesc tempPrivs(row.privsBitmap_, row.grantableBitmap_); currentPrivs = tempPrivs; PrivMgrCoreDesc savedOriginalPrivs = currentPrivs; // TDB: if user privs have already been revoked, just return // save the privsToRevoke for query invalidation(QI) later PrivMgrDesc listOfRevokedPrivileges = privsToRevoke; // merge requested changes with existing row // First flip privsToRevoke to turn off the privilege and then union // the current privs with the privsToRevoke to generate the final bitmaps privsToRevoke.complement(); privsToRevoke.intersectionOfPrivs(currentPrivs); row.privsBitmap_ = privsToRevoke.getTablePrivs().getPrivBitmap(); row.grantableBitmap_ = privsToRevoke.getTablePrivs().getWgoBitmap(); // See if there are any dependencies that need to be removed before // removing the privilege ObjectUsage objectUsage; objectUsage.objectUID = objectUID_; objectUsage.objectOwner = granteeID; objectUsage.objectName = row.objectName_; objectUsage.objectType = row.objectType_; PrivMgrDesc originalPrivs (row.granteeID_); originalPrivs.setTablePrivs(savedOriginalPrivs); objectUsage.originalPrivs = originalPrivs; objectUsage.updatedPrivs = privsToRevoke; // get list of dependent objects that need to change std::vector<ObjectUsage *> listOfObjects; retcode = getAffectedObjects(objectUsage, PrivCommand::REVOKE_OBJECT_RESTRICT, listOfObjects); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } char buf[1000]; ObjectPrivsMDTable objectPrivsTable (objectTableName_, pDiags_); ColumnPrivsMDTable columnPrivsTable(columnTableName_,pDiags_); // update the OBJECT_PRIVILEGES row for each effected object for (size_t i = 0; i < listOfObjects.size(); i++) { ObjectUsage *pObj = listOfObjects[i]; PrivMgrCoreDesc thePrivs = pObj->updatedPrivs.getTablePrivs(); int32_t theGrantor = grantorID_; int32_t theGrantee = pObj->objectOwner; int64_t theUID = pObj->objectUID; sprintf(buf, "where grantee_id = %d and grantor_id = %d and object_uid = %ld", theGrantee, theGrantor, theUID); std::string whereClause (buf); if (thePrivs.isNull()) { pObj->describe(traceMsg); traceMsg.insert (0, "deleted object usage "); // delete the row retcode = objectPrivsTable.deleteWhere(whereClause); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } // Delete any corresponding column-level privileges. retcode = columnPrivsTable.deleteWhere(whereClause); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } } else { sprintf(buf, "set privileges_bitmap = %ld, grantable_bitmap = %ld", thePrivs.getPrivBitmap().to_ulong(), thePrivs.getWgoBitmap().to_ulong()); std::string setClause (buf); pObj->describe(traceMsg); traceMsg.insert (0, "updated object usage "); // update the row retcode = objectPrivsTable.updateWhere(setClause, whereClause); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } // Update any corresponding column-level privileges. retcode = columnPrivsTable.updateWhere(setClause,whereClause); if (retcode == STATUS_ERROR) { deleteListOfAffectedObjects(listOfObjects); return retcode; } } } deleteListOfAffectedObjects(listOfObjects); // Go rebuild the privilege tree to see if it is broken // If it is broken, return an error if (checkRevokeRestrict (row, objectRowList_)) return STATUS_ERROR; // Send a message to the Trafodion RMS process about the revoke operation. // RMS will contact all master executors and ask that cached privilege // information be re-calculated retcode = sendSecurityKeysToRMS(granteeID, listOfRevokedPrivileges); // SQL Ansi states that privileges that can be revoked should be done so // even if some requested privilege are not revokable. // TDB: report which privileges were not revoked if (warnNotAll) *pDiags_ << DgSqlCode(CAT_NOT_ALL_PRIVILEGES_REVOKED); log (__FILE__, "****** REVOKE operation succeeded ******", -1); return retcode; } // ***************************************************************************** // * Method: revokeObjectPriv // * // * Deletes rows in the OBJECT_PRIVILEGES table associated with the object // * This code assumes that all dependent and referencing objects such as // * views have been (or will be) dropped. No extra checks are performed. // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were revoked // * *: Unable to revoke privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::revokeObjectPriv () { PrivStatus retcode = STATUS_GOOD; if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for revoke command"); return STATUS_ERROR; } char buf[100]; sprintf(buf, "where object_uid = %ld", objectUID_); std::string whereClause = buf; // delete all the rows for this object ObjectPrivsMDTable objectPrivsTable (objectTableName_, pDiags_); retcode = objectPrivsTable.deleteWhere(whereClause); return retcode; } // ---------------------------------------------------------------------------- // method: checkRevokeRestrict // // This method starts at the beginning of the privilege tree and rebuilds // it from top to bottom. If the revoke causes part of the tree to be // unaccessible (a broken branch), it returns true; otherwise, revoke can // proceed - returns false. // // Params: // rowIn - the row containing proposed changes from the requested // revoke statement. // rowList - a list of all the rows associated with the object // // true - unable to perform revoke because of dependencies // false - able to perform revoke.privileges // // The diags area is set up with where the tree was broken // ---------------------------------------------------------------------------- bool PrivMgrPrivileges::checkRevokeRestrict ( PrivMgrMDRow &rowIn, std::vector <PrivMgrMDRow *> &rowList ) { // Search the list of privileges associated with the object and replace // the bitmaps of the current row with the bitmaps of the row sent in (rowIn). // At the same time, clear visited_ and set current_ to row values ObjectPrivsMDRow updatedRow = static_cast<ObjectPrivsMDRow &>(rowIn); std::string traceMsg; log (__FILE__, "checking grant tree for broken branches", -1); for (int32_t i = 0; i < rowList.size(); i++) { // if rowIn matches this row, then update the bitmaps to use the // updated bitmaps ObjectPrivsMDRow &currentRow = static_cast<ObjectPrivsMDRow &> (*rowList[i]); if (updatedRow.granteeID_ == currentRow.granteeID_ && updatedRow.grantorID_ == currentRow.grantorID_ ) { currentRow.privsBitmap_ = updatedRow.privsBitmap_; currentRow.grantableBitmap_ = updatedRow.grantableBitmap_; } // reset visited_ and current_ PrivMgrCoreDesc currentRow.clearVisited(); currentRow.setToOriginal(); } // Reconstruct the privilege tree // Each privilege tree starts with the root - system grantor (-2) for ( size_t i = 0; i < NBR_OF_PRIVS; i++ ) { PrivType pType = PrivType(i); int32_t systemGrantor = SYSTEM_AUTH_ID; scanObjectBranch (pType, systemGrantor, rowList); // TDB - add a scan for column privileges } // If a branch of the tree was not visited, then we have a broken // tree. Therefore, revoke restrict will leave abandoned privileges // in the case, return true. bool notVisited = false; for (size_t i = 0; i < rowList.size(); i++) { ObjectPrivsMDRow &currentRow = static_cast<ObjectPrivsMDRow &> (*rowList[i]); currentRow.describeRow(traceMsg); log (__FILE__, traceMsg, i); if (currentRow.anyNotVisited()) { *pDiags_ << DgSqlCode(-CAT_DEPENDENT_PRIV_EXISTS) << DgString0(currentRow.grantorName_.c_str()) << DgString1(currentRow.granteeName_.c_str()); log (__FILE__, "found a branch that is not accessible", -1); notVisited = true; break; } } return notVisited; } // ---------------------------------------------------------------------------- // method: scanObjectBranch // // scans the privsList entries for match on Grantor, // keeping track of which priv/wgo entries have been encountered // by setting "visited" flag in the entry. // // For each entry discovered, set visited flag to indicate that // priv and wgo were seen. For wgo, if the wgo visited flag has not // already been set, call scanObjectBranch recursively with this grantee // as grantor. (By observing the state of the wgo visited flag // we avoid redundantly exploring the sub-tree rooted in a grantor // which has already been discovered as having wgo from some other // ancestor grantor.) // // This algorithm produces a depth-first scan of all nodes of the // directed graph of privilege settings which can currently be reached // by an uninterrupted chain of wgo values. // // The implementation is dependent on the fact that PrivsList // entries are ordered by Grantor, Grantee, and within each of these // by Primary uid value, type. Entries for system grantor (_SYSTEM) are the // first entries in the list. // // ----------------------------------------------------------------------------- void PrivMgrPrivileges::scanObjectBranch( const PrivType pType, // in const int32_t& grantor, // in const std::vector<PrivMgrMDRow *> & privsList ) // in { // The PrivMgrMDRow <list> is maintained in order by // Grantee within Grantor value - through an order by clause. // Skip over Grantors lower than the specified one. size_t i = 0; while ( i < privsList.size() ) { ObjectPrivsMDRow &currentRow = static_cast<ObjectPrivsMDRow &> (*privsList[i]); if (currentRow.grantorID_ < grantor) i++; else break; } // For matching Grantor, process each Grantee. while ( i < privsList.size() ) { ObjectPrivsMDRow &privEntry = static_cast<ObjectPrivsMDRow &> (*privsList[i]); if (privEntry.grantorID_ == grantor) { PrivMgrCoreDesc& current = privEntry.accessCurrent(); if ( current.getPriv(pType) ) { // This grantee has priv. Set corresponding visited flag. PrivMgrCoreDesc& visited = privEntry.accessVisited(); visited.setPriv(pType, true); if ( current.getWgo(pType)) { // This grantee has wgo. if ( visited.getWgo(pType) ) { // Already processed this subtree. } else { visited.setWgo(pType, true); int32_t thisGrantee( privEntry.granteeID_ ); if ( ComUser::isPublicUserID(thisGrantee) ) scanPublic( pType, // Deal with PUBLIC grantee wgo. privsList ); else { int32_t granteeAsGrantor(thisGrantee); scanObjectBranch( pType, // Scan for this grantee as grantor. granteeAsGrantor, privsList ); } } } // end this grantee has wgo } // end this grantee has this priv i++; // on to next privsList entry } else break; } // end scan privsList over Grantees for this Grantor } // end scanCurrent /* ******************************************************************* scanPublic -- a grant wgo to PUBLIC has been encountered for the current privilege type, so *all* users are able to grant this privilege. Scan the privsList for all grantees who have this priv from any grantor, marking each such entry as visited. ****************************************************************** */ void PrivMgrPrivileges::scanPublic( const PrivType pType, // in const std::vector<PrivMgrMDRow *>& privsList ) // in { // PUBLIC has a priv wgo. So *every* grant of this priv // is allowed, by any Grantor. for ( size_t i = 0; i < privsList.size(); i++ ) { ObjectPrivsMDRow &privEntry = static_cast<ObjectPrivsMDRow &> (*privsList[i]); const PrivMgrCoreDesc& current = privEntry.accessCurrent(); if ( current.getPriv(pType) ) { // This grantee has priv. Set corresponding visited flag. PrivMgrCoreDesc& visited = privEntry.accessVisited(); visited.setPriv(pType, true); if ( current.getWgo(pType) ) visited.setWgo(pType, true); } } // end scan privsList over all Grantees/Grantors } // end scanPublic // **************************************************************************** // method: sendSecurityKeysToRMS // // This method generates a security key for each privilege revoked for the // grantee. It then makes a cli call sending the keys. // SQL_EXEC_SetSecInvalidKeys will send the security keys to RMS and RMS // sends then to all the master executors. The master executors check this // list and recompile any queries to recheck privileges. // // input: // granteeID - the UID of the user losing privileges // the granteeID is stored in the PrivMgrDesc class - extra? // listOfRevokePrivileges - the list of privileges that were revoked // // Returns: PrivStatus // // STATUS_GOOD: Privileges were granted // *: Unable to send keys, see diags. // **************************************************************************** PrivStatus PrivMgrPrivileges::sendSecurityKeysToRMS( const int32_t granteeID, const PrivMgrDesc &listOfRevokedPrivileges) { // Go through the list of table privileges and generate SQL_QIKEYs #if 0 // Only need to generate keys for SELECT, INSERT, UPDATE, and DELETE std::vector<ComSecurityKey *> keyList; PrivMgrCoreDesc privs = listOfRevokedPrivileges.getTablePrivs(); for ( size_t i = 0; i < NBR_OF_PRIVS; i++ ) { PrivType pType = PrivType(i); if (pType == SELECT_PRIV || pType == INSERT_PRIV || pType == UPDATE_PRIV || pType == DELETE_PRIV) { if (privs.getPriv(pType)) { ComSecurityKey *key = new ComSecurityKey(granteeID, objectUID_, pType, ComSecurityKey::OBJECT_IS_OBJECT); if (key->isValid()) keyList.push_back(key); else { // Probably should report a different error. Is an error possible? *pDiags_ << DgSqlCode (-CAT_NOT_AUTHORIZED); return STATUS_ERROR; } } } } #endif std::vector<ComSecurityKey *> keyList; PrivMgrCoreDesc privs = listOfRevokedPrivileges.getTablePrivs(); PrivStatus privStatus = buildSecurityKeys(granteeID,privs,keyList); if (privStatus != STATUS_GOOD) return privStatus; // TDB: add column privileges // Create an array of SQL_QIKEYs int32_t numKeys = keyList.size(); SQL_QIKEY siKeyList[numKeys]; for (size_t j = 0; j < keyList.size(); j++) { ComSecurityKey *pKey = keyList[j]; siKeyList[j].revokeKey.subject = pKey->getSubjectHashValue(); siKeyList[j].revokeKey.object = pKey->getObjectHashValue(); std::string actionString; pKey->getSecurityKeyTypeAsLit(actionString); strncpy(siKeyList[j].operation, actionString.c_str(), 2); } // delete the security list for(size_t k = 0; k < keyList.size(); k++) delete keyList[k]; keyList.clear(); // Call the CLI to send details to RMS SQL_EXEC_SetSecInvalidKeys(numKeys, siKeyList); return STATUS_GOOD; } // ***************************************************************************** // * Method: populateObjectPriv // * // * Inserts rows into the OBJECT_PRIVILEGES table during initialization to // * reflect object owner privileges // * // * Parameters: // * // * <objectLocation> the location of the Trafodion OBJECTS table which is // * used to extract all the objects // * <authsLocation> the location of the Trafodion AUTHS table which is used // * to map owner IDs to grantees // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were inserted // * *: Unable to insert privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::populateObjectPriv( const std::string &objectsLocation, const std::string &authsLocation) { // bug - sometimes, if don't wait, the insert command // does not find rows to insert //sleep(60); ObjectPrivsMDTable objectPrivsTable(objectTableName_, pDiags_); return objectPrivsTable.insertSelect(objectsLocation, authsLocation); } // ***************************************************************************** // * Method: getPrivBitmaps // * // * Reads the OBJECT_PRIVILEGES table to get the privilege bitmaps for // * rows matching a where clause. // * // * Parameters: // * // * <whereClause> specifies the rows to be returned // * <orderByClause> specifies the order of the rows to be returned // * <privBitmaps> passes back a vector of bitmaps. // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were returned // * *: Unable to read privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getPrivBitmaps( const std::string & whereClause, const std::string & orderByClause, std::vector<PrivObjectBitmap> & privBitmaps) { std::vector<PrivMgrMDRow *> rowList; ObjectPrivsMDTable objectPrivsTable(objectTableName_,pDiags_); PrivStatus privStatus = objectPrivsTable.selectWhere(whereClause, orderByClause,rowList); if (privStatus != STATUS_GOOD) { deleteRowList(rowList); return privStatus; } for (size_t r = 0; r < rowList.size(); r++) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &>(*rowList[r]); privBitmaps.push_back(row.privsBitmap_); } deleteRowList(rowList); return STATUS_GOOD; } // ***************************************************************************** // * Method: getPrivTextForObject // * // * returns GRANT statements describing all the privileges that have been // * granted on the object // * // * Parameters: // * // * <objectInfo> Metadata details for object. // * <privilegeText> The resultant GRANT statement(s) // * // * Returns: PrivStatus // * // * STATUS_GOOD : Grants were found // * STATUS_NOTFOUND: No grants were found // * *: Unable to insert privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getPrivTextForObject( const PrivMgrObjectInfo & objectInfo, std::string & privilegeText) { PrivStatus retcode = STATUS_GOOD; if (objectUID_ == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for describe privileges command"); return STATUS_ERROR; } // generate the list of privileges granted to the object and store in class if (generateObjectRowList() == STATUS_ERROR) return STATUS_ERROR; if (generateColumnRowList() == STATUS_ERROR) return STATUS_ERROR; // No failures possible for objects, all information in rowList. buildPrivText(objectRowList_,objectInfo,PrivLevel::OBJECT,pDiags_,privilegeText); // build text for columns retcode = buildPrivText(columnRowList_,objectInfo,PrivLevel::COLUMN, pDiags_,privilegeText); return retcode; } // ***************************************************************************** // * Method: getPrivsOnObjectForUser // * // * returns privileges granted to the requested user for the requested // * object // * // * Parameters: // * // * <objectUID> identifies the object // * <userID> identifies the user // * <userPrivs> the list of privileges is returned // * <grantablePrivs> the list of grantable privileges is returned // * <colPrivsList> the list of column-level privileges is returned // * <colGrantableList> the list of grantable column-level privileges is returned // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were gathered // * *: Unable to gather privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getPrivsOnObjectForUser( const int64_t objectUID, ComObjectType objectType, const int32_t userID, PrivObjectBitmap &userPrivs, PrivObjectBitmap &grantablePrivs, PrivColList & colPrivsList, PrivColList & colGrantableList, std::vector <ComSecurityKey *>* secKeySet) { PrivStatus retcode = STATUS_GOOD; objectUID_ = objectUID; if (objectUID == 0) { PRIVMGR_INTERNAL_ERROR("objectUID is 0 for get privileges command"); return STATUS_ERROR; } // generate the list of privileges granted to the object and store in class if (generateObjectRowList() == STATUS_ERROR) return STATUS_ERROR; objectUID_ = objectUID; PrivMgrDesc privsOfTheUser(userID); bool hasManagePrivileges = false; std::vector<int32_t> roleIDs; retcode = getRoleIDsForUserID(userID,roleIDs); if (retcode == STATUS_ERROR) return retcode; retcode = getUserPrivs(objectType, userID, roleIDs, privsOfTheUser, hasManagePrivileges, secKeySet); if (retcode != STATUS_GOOD) return retcode; if (hasManagePrivileges && hasAllDMLPrivs(objectType,privsOfTheUser.getTablePrivs().getPrivBitmap())) { userPrivs = privsOfTheUser.getTablePrivs().getPrivBitmap(); grantablePrivs = userPrivs; return STATUS_GOOD; } // generate the list of column-level privileges granted to the object and store in class if (generateColumnRowList() == STATUS_ERROR) return STATUS_ERROR; retcode = getColPrivsForUser(userID,roleIDs,colPrivsList,colGrantableList,secKeySet); if (retcode != STATUS_GOOD) return retcode; userPrivs = privsOfTheUser.getTablePrivs().getPrivBitmap(); if (hasManagePrivileges) grantablePrivs = userPrivs; else grantablePrivs = privsOfTheUser.getTablePrivs().getWgoBitmap(); return retcode; } // ***************************************************************************** // * Method: getRoleIDsForUserID // * // * Returns the roleIDs for the roles granted to the user. // * // * Parameters: // * // * <userID> is the unique identifier for the user // * <roleIDs> passes back the list (potentially empty) of roles granted to the user // * // * Returns: PrivStatus // * // * STATUS_GOOD: Role list returned // * *: Unable to fetch granted roles, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getRoleIDsForUserID( int32_t userID, std::vector<int32_t> & roleIDs) { PrivStatus retcode = STATUS_GOOD; PrivMgrRoles roles(" ",metadataLocation_,pDiags_); std::vector<std::string> roleNames; std::vector<int32_t> roleDepths; retcode = roles.fetchRolesForUser(userID,roleNames,roleIDs,roleDepths); return retcode; } //*************** End of PrivMgrPrivileges::getRoleIDsForUserID **************** // ***************************************************************************** // * Method: getUserPrivs // * // * Accumulates privileges for a user summarized over all grantors // * including PUBLIC // * // * Parameters: // * // * <objectType> is the type of the subject object. // * <granteeID> specifies the userID to accumulate // * <roleIDs> specifies a list of roles granted to the grantee // * <summarizedPrivs> contains the summarized privileges // * <hasManagePrivileges> returns whether the grantee has MANAGE_PRIVILEGES authority // * <secKeySet> if not NULL, returns a set of keys for user // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were gathered // * *: Unable to gather privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getUserPrivs( ComObjectType objectType, const int32_t granteeID, const std::vector<int32_t> & roleIDs, PrivMgrDesc &summarizedPrivs, bool & hasManagePrivileges, std::vector <ComSecurityKey *>* secKeySet ) { PrivStatus retcode = STATUS_GOOD; PrivMgrDesc temp(granteeID); retcode = getPrivsFromAllGrantors( objectUID_, objectType, granteeID, roleIDs, temp, hasManagePrivileges, secKeySet ); if (retcode != STATUS_GOOD) return retcode; summarizedPrivs = temp; // TBD - set all column granted if the table level privilege is set return retcode; } // ***************************************************************************** // * Method: getPrivsFromAllGrantors // * // * Accumulates privileges for a specified userID // * Does the actual accumulation orchestrated by getUserPrivs // * // * Parameters: // * // * <objectUID> object to gather privileges for // * <objectType> is the type of the subject object. // * <granteeID> specifies the userID to accumulate // * <roleIDs> is vector of roleIDs granted to the grantee // * <hasManagePrivileges> returns whether the grantee has MANAGE_PRIVILEGES authority // * <summarizedPrivs> contains the summarized privileges // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were accumulated // * *: Unable to accumulate privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getPrivsFromAllGrantors( const int64_t objectUID, ComObjectType objectType, const int32_t granteeID, const std::vector<int32_t> & roleIDs, PrivMgrDesc &summarizedPrivs, bool & hasManagePrivileges, std::vector <ComSecurityKey *>* secKeySet ) { PrivStatus retcode = STATUS_GOOD; hasManagePrivileges = false; // Check to see if the granteeID is the system user // if so, the system user has all privileges. Set up appropriately if (ComUser::isSystemUserID(granteeID)) { PrivObjectBitmap bitmap; bitmap.set(); PrivMgrCoreDesc coreTablePrivs(bitmap, bitmap); summarizedPrivs.setTablePrivs(coreTablePrivs); hasManagePrivileges = true; return STATUS_GOOD; } PrivObjectBitmap systemPrivs; PrivMgrComponentPrivileges componentPrivileges(metadataLocation_,pDiags_); componentPrivileges.getSQLDMLPrivileges(granteeID,roleIDs,systemPrivs, hasManagePrivileges); if (hasManagePrivileges && hasAllDMLPrivs(objectType,systemPrivs)) { PrivMgrCoreDesc coreTablePrivs(systemPrivs,systemPrivs); summarizedPrivs.setTablePrivs(coreTablePrivs); return STATUS_GOOD; } std::vector<PrivMgrMDRow *> rowList; retcode = getRowsForGrantee(objectUID, granteeID, true, roleIDs, rowList, secKeySet); if (retcode == STATUS_ERROR) return retcode; // Get the privileges for the object granted to the grantee PrivMgrCoreDesc coreTablePrivs; for (int32_t i = 0; i < rowList.size();++i) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &> (*rowList[i]); if (secKeySet != NULL) { PrivMgrCoreDesc privs(row.privsBitmap_,0); retcode = buildSecurityKeys(row.granteeID_,privs,*secKeySet); if (retcode != STATUS_GOOD) return retcode; } PrivMgrCoreDesc temp (row.privsBitmap_, row.grantableBitmap_); coreTablePrivs.unionOfPrivs(temp); } PrivObjectBitmap grantableBitmap; if (hasManagePrivileges) grantableBitmap = systemPrivs; PrivMgrCoreDesc temp2(systemPrivs,grantableBitmap); coreTablePrivs.unionOfPrivs(temp2); summarizedPrivs.setTablePrivs(coreTablePrivs); return STATUS_GOOD; } // ***************************************************************************** // * Method: getRowsForGrantee // * // * Reads OBJECT_PRIVILEGES table to obtain all privileges granted to the // * specified granteeID for the object (objectUID) // * // * Parameters: // * // * <objectUID> object to gather privileges for // * <granteeID> specifies the userID to gather privileges // * <roleIDs> the list of roles granted to the userID // * <isObjectTable> true if OBJECT_PRIVILEGES table // * <rowList> returns the list of granted privileges as a vector list // * consisiting of the grantor, grantee, and privileges for the object // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were retrieved // * *: Unable to retrieve privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::getRowsForGrantee( const int64_t objectUID, const int32_t granteeID, const bool isObjectTable, const std::vector<int_32> &roleIDs, std::vector<PrivMgrMDRow *> &rowList, std::vector <ComSecurityKey *>* secKeySet) { PrivStatus retcode = STATUS_GOOD; #if 0 if (isObjectTable) { if (objectRowList_.size() == 0) { PRIVMGR_INTERNAL_ERROR("privilege list for object has not been created"); return STATUS_ERROR; } } else // isColumnTable { if (columnRowList_.size() == 0) { PRIVMGR_INTERNAL_ERROR("privilege list for columns have not been created"); return STATUS_ERROR; } } #endif // create the list of row pointers from the cached list std::vector<int32_t> authIDs = roleIDs; authIDs.push_back(granteeID); authIDs.push_back(PUBLIC_AUTH_ID); std::vector<int32_t>::iterator it; std::vector<PrivMgrMDRow *> privRowList; if (isObjectTable) privRowList = objectRowList_; else privRowList = columnRowList_; for (size_t i = 0; i < privRowList.size(); i++) { if (isObjectTable) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &> (*privRowList[i]); it = std::find(authIDs.begin(), authIDs.end(), row.granteeID_); } else { ColumnPrivsMDRow &row = static_cast<ColumnPrivsMDRow &> (*privRowList[i]); it = std::find(authIDs.begin(), authIDs.end(), row.granteeID_); } if (it != authIDs.end()) rowList.push_back(privRowList[i]); } if (secKeySet != NULL) { retcode = buildUserSecurityKeys(roleIDs,granteeID,*secKeySet); if (retcode == STATUS_ERROR) { PRIVMGR_INTERNAL_ERROR("Unable to build user security key"); return STATUS_ERROR; } } return STATUS_GOOD; } // ***************************************************************************** // * Method: summarizeCurrentAndOriginalPrivs // * // * Accumulates privileges for a specified object and grantee // * // * Parameters: // * // * <objectUID> object to summarize privileges for // * <granteeID> specifies the userID to accumulate // * <roleIDs> the list of roles granted to the userID // * <summarizedOriginalPrivs> contains the original summarized privileges // * <summarizedCurrentPrivs> contains the current summarized privileges // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were summarized // * *: Unable to summarize privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::summarizeCurrentAndOriginalPrivs( const int64_t objectUID, const int32_t granteeID, const std::vector<int32_t> & roleIDs, const std::vector<ObjectUsage *> listOfChangedPrivs, PrivMgrDesc &summarizedOriginalPrivs, PrivMgrDesc &summarizedCurrentPrivs) { PrivStatus retcode = STATUS_GOOD; // get OBJECT_PRIVILEGES rows where the grantee has received privileges std::vector<PrivMgrMDRow *> rowList; retcode = getRowsForGrantee(objectUID, granteeID, true, roleIDs, rowList, NULL); // rowList contains the original privileges, // listOfChangedPrivs contains any updates to privileges // go through the list and summarize the original and current privileges // We do a union operation to capture privileges from all grantors for (int32_t i = 0; i < rowList.size();++i) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &> (*rowList[i]); PrivMgrCoreDesc original(row.privsBitmap_, row.grantableBitmap_); PrivMgrCoreDesc current = original; for (size_t j = 0; j < listOfChangedPrivs.size(); j++) { ObjectUsage *pObj = listOfChangedPrivs[j]; if (pObj->objectUID == row.objectUID_ && grantorID_ == row.grantorID_ && pObj->objectOwner == row.granteeID_ ) { current = pObj->updatedPrivs.getTablePrivs(); } } summarizedOriginalPrivs.unionOfPrivs(original); summarizedCurrentPrivs.unionOfPrivs(current); } return STATUS_GOOD; } // ***************************************************************************** // * Method: isAuthIDGrantedPrivs // * // * Determines if the specified authorization ID has been granted one or // * more object privileges. // * // * Parameters: // * // * <authID> identifies the user or role. // * // * Returns: bool // * // * true: Authorization ID has been granted one or more object privileges. // * false: Authorization ID has not been granted any object privileges. // * // ***************************************************************************** bool PrivMgrPrivileges::isAuthIDGrantedPrivs(const int32_t authID) { std::string whereClause(" WHERE GRANTEE_ID = "); char authIDString[20]; sprintf(authIDString,"%d",authID); whereClause += authIDString; // set pointer in diags area int32_t diagsMark = pDiags_->mark(); int64_t rowCount = 0; ObjectPrivsMDTable myTable(objectTableName_,pDiags_); PrivStatus privStatus = myTable.selectCountWhere(whereClause,rowCount); if ((privStatus == STATUS_GOOD || privStatus == STATUS_WARNING) && rowCount > 0) return true; pDiags_->rewind(diagsMark); return false; } // ***************************************************************************** // * Method: convertPrivsToDesc // * // * Converts the list of requested privileges into a PrivMgrDesc // * This code also checks for duplicate entries in the privilege list // * and that the list of privileges is compatible with the object type. // * // * Parameters: // * // * <objectType> type of object // * <isAllSpecified> if true then all privileges valid for the object // * type will be revoked // * <isWGOSpecified> if true then remove the ability for the grantee // * to revoke the set of privileges to other grantees // * <privsList> is the list of privileges to check // * <PrivMgrCoreDesc> the core descriptor containing privileges // * // * Returns: PrivStatus // * // * STATUS_GOOD: Privileges were inserted // * *: Unable to insert privileges, see diags. // * // ***************************************************************************** PrivStatus PrivMgrPrivileges::convertPrivsToDesc( const ComObjectType objectType, const bool isAllSpecified, const bool isWgoSpecified, const bool isGOFSpecified, const std::vector<PrivType> privsList, PrivMgrDesc &privsToProcess) { // Categorize the objectType bool isLibrary = false; bool isUdr = false; bool isObject = false; bool isSequence = false; switch (objectType) { case COM_BASE_TABLE_OBJECT: case COM_VIEW_OBJECT: isObject = true; break; case COM_LIBRARY_OBJECT: isLibrary = true; break; case COM_USER_DEFINED_ROUTINE_OBJECT: isUdr = true; break; case COM_SEQUENCE_GENERATOR_OBJECT: isSequence = true; break; default: { char objectTypeLit[3] = {0}; strncpy(objectTypeLit,ObjectEnumToLit(objectType),2); *pDiags_ << DgSqlCode(-4219) << DgString1(objectTypeLit); return STATUS_ERROR; } } // If all is specified, set bits appropriate for the object type and return if (isAllSpecified) { if (isLibrary) privsToProcess.setAllLibraryGrantPrivileges(isWgoSpecified); else if (isUdr) privsToProcess.setAllUdrGrantPrivileges(isWgoSpecified); else if (isSequence) privsToProcess.setAllSequenceGrantPrivileges(isWgoSpecified); else privsToProcess.setAllTableGrantPrivileges(isWgoSpecified); return STATUS_GOOD; } PrivMgrCoreDesc tableCorePrivs; // For each privilege specified in the privsList: // make sure it is not a duplicate // make sure it is appropriate for the objectType bool isIncompatible = false; for (int32_t i = 0; i < privsList.size();++i) { switch (privsList[i]) { case EXECUTE_PRIV: if (!isUdr) isIncompatible = true; else tableCorePrivs.testAndSetBit(privsList[i],isWgoSpecified,isGOFSpecified); break; case DELETE_PRIV: case INSERT_PRIV: case REFERENCES_PRIV: case SELECT_PRIV: if (!isObject) isIncompatible = true; else tableCorePrivs.testAndSetBit(privsList[i],isWgoSpecified,isGOFSpecified); break; case UPDATE_PRIV: if (!isObject && !isLibrary) isIncompatible = true; else tableCorePrivs.testAndSetBit(privsList[i],isWgoSpecified,isGOFSpecified); break; case USAGE_PRIV: if (!isLibrary && !isSequence) isIncompatible = true; else tableCorePrivs.testAndSetBit(privsList[i],isWgoSpecified,isGOFSpecified); break; case ALL_DML: if (!isObject) isIncompatible = true; else if (isGOFSpecified) tableCorePrivs.setWgo(ALL_DML,true); else { tableCorePrivs.setPriv(ALL_DML,true); tableCorePrivs.setWgo(ALL_DML,isWgoSpecified); } break; default: { *pDiags_ << DgSqlCode(-CAT_INVALID_PRIVILEGE_FOR_GRANT_OR_REVOKE) << DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privsList[i]).c_str()); return STATUS_ERROR; } } // Report error if privilege is incompatible with objectType if (isIncompatible) { *pDiags_ << DgSqlCode(-CAT_PRIVILEGE_NOT_ALLOWED_FOR_THIS_OBJECT_TYPE) << DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privsList[i]).c_str()); return STATUS_ERROR; } } // end for privsToProcess.setTablePrivs(tableCorePrivs); return STATUS_GOOD; } // ***************************************************************************** // PrivMgrPrivileges.cpp static functions * // ***************************************************************************** // ***************************************************************************** // * Function: buildPrivText * // * * // * Builds priv portion of SHOWDDL output. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <rowList> const std::vector<PrivMgrMDRow *> In * // * is a list of rows describing the privileges granted. * // * * // * <objectInfo> const PrivMgrObjectInf & In * // * object details needed to create appropriate text * // * * // * <privLevel> PrivLevel In * // * is the privilege level, either OBJECT or COLUMN. * // * * // * <pDiags_> ComDiagsArea * In * // * is where to report an internal error. * // * * // * <privilegeText> std::string & Out * // * passes back the set of grant commands that describe the privileges on * // * the object. * // * * // ***************************************************************************** static PrivStatus buildPrivText( const std::vector<PrivMgrMDRow *> rowList, const PrivMgrObjectInfo & objectInfoIn, PrivLevel privLevel, ComDiagsArea * pDiags_, std::string & privilegeText) { PrivMgrObjectInfo objectInfo = (PrivMgrObjectInfo) objectInfoIn; // Build a grant statement for each grantor/grantee row returned. // TDB: If we support multiple grantees per grant statement, // this code can be improved std::string grantStmt; std::string grantWGOStmt; std::string objectText("ON "); // Append object type if not base table or view if (objectInfo.getObjectType() != COM_BASE_TABLE_OBJECT && objectInfo.getObjectType() != COM_VIEW_OBJECT) objectText += comObjectTypeName(objectInfo.getObjectType()); objectText += objectInfo.getObjectName() + " TO "; std::string lastGranteeName; int32_t lastGranteeID = 0; std::vector<std::string> privString; std::vector<std::string> WGOString; std::vector<bool> hasWGO; std::vector<bool> hasPriv; std::vector<std::string> columnNames; bool mergeStrings = false; // Note, this creates entries for DELETE and USAGE that are unused. if (privLevel == PrivLevel::COLUMN) for (size_t p = FIRST_DML_COL_PRIV; p <= LAST_DML_COL_PRIV; p++ ) { privString.push_back(PrivMgrUserPrivs::convertPrivTypeToLiteral((PrivType)p) + "("); WGOString.push_back(privString[p]); hasPriv.push_back(false); hasWGO.push_back(false); } for (int32_t i = 0; i < rowList.size();++i) { std::string objectGranteeText(objectText); std::string withoutWGO; std::string withWGO; int32_t grantorID = 0; std::string grantorName; if (privLevel == PrivLevel::OBJECT) { ObjectPrivsMDRow &row = static_cast<ObjectPrivsMDRow &> (*rowList[i]); grantorID = row.grantorID_; grantorName = row.grantorName_; PrivObjectBitmap privsBitmap = row.privsBitmap_; PrivObjectBitmap wgoBitmap = row.grantableBitmap_; bool delimited = isDelimited(row.granteeName_); if (delimited) objectGranteeText += "\""; objectGranteeText += row.granteeName_; if (delimited) objectGranteeText += "\""; for (size_t p = FIRST_DML_PRIV; p <= LAST_DML_PRIV; p++ ) if (privsBitmap.test(p)) { std::string privTypeString = PrivMgrUserPrivs::convertPrivTypeToLiteral((PrivType)p); if (wgoBitmap.test(p)) withWGO += privTypeString + ", "; else withoutWGO += privTypeString + ", "; } } else { ColumnPrivsMDRow &row = static_cast<ColumnPrivsMDRow &> (*rowList[i]); // For column-level privileges we are building a piece of the // output for each privilege on every loop. Privileges are stored // per column, but GRANT syntax accepts via a privilege and a // list of columns. For each privilege granted to a grantee, need // to list all the columns. Substrings are merged when the end of the // list of grants is reached or there is a new grantor or grantee. if (i + 1 == rowList.size()) mergeStrings = true; else { ColumnPrivsMDRow &nextRow = static_cast<ColumnPrivsMDRow &> (*rowList[i + 1]); if (nextRow.grantorID_ != row.grantorID_ || nextRow.granteeID_ != row.granteeID_) mergeStrings = true; } grantorID = row.grantorID_; grantorName = row.grantorName_; PrivColumnBitmap privsBitmap = row.privsBitmap_; PrivColumnBitmap wgoBitmap = row.grantableBitmap_; // Get name of the grantee. If we have changed grantees, fetch the // name of the grantee. if (row.granteeID_ != lastGranteeID) { lastGranteeID = row.granteeID_; lastGranteeName = row.granteeName_; } bool delimited = isDelimited(lastGranteeName); if (delimited) objectGranteeText += "\""; objectGranteeText += lastGranteeName; if (delimited) objectGranteeText += "\""; // Get the column name for the row const std::vector<std::string> &columnList = objectInfo.getColumnList(); if (columnList.size() < row.columnOrdinal_) { std::string errorText("Unable to look up column name for column number "); errorText += PrivMgr::authIDToString(row.columnOrdinal_); PRIVMGR_INTERNAL_ERROR(errorText.c_str()); return STATUS_ERROR; } std::string columnName(columnList.at(row.columnOrdinal_)); // Build the list of columns granted for each privilege. WGOString // and privString have been pre-populated with PRIVNAME(. for (size_t p = FIRST_DML_COL_PRIV; p <= LAST_DML_COL_PRIV; p++ ) if (privsBitmap.test(p)) { if (wgoBitmap.test(p)) { WGOString[p] += columnName + ", "; hasWGO[p] = true; } else { privString[p] += columnName + ", "; hasPriv[p] = true; } } // Check if there are column priv substrings that need to be merged. if (mergeStrings) { for (size_t p = FIRST_DML_COL_PRIV; p <= LAST_DML_COL_PRIV; p++ ) { if (!isDMLPrivType(static_cast<PrivType>(p))) continue; if (hasWGO[p]) { closeColumnList(WGOString[p]); withWGO += WGOString[p]; // Reset to original value WGOString[p].assign(PrivMgrUserPrivs::convertPrivTypeToLiteral((PrivType)p) + "("); hasWGO[p] = false; } else if (hasPriv[p]) { closeColumnList(privString[p]); withoutWGO += privString[p]; // Reset to original value privString[p].assign(PrivMgrUserPrivs::convertPrivTypeToLiteral((PrivType)p) + "("); hasPriv[p] = false; } } mergeStrings = false; } }//End of PrivLevel::COLUMN if (!withoutWGO.empty()) buildGrantText(withoutWGO,objectGranteeText, grantorID,grantorName,false, objectInfo.getObjectOwner(),grantStmt); if (!withWGO.empty()) buildGrantText(withWGO,objectGranteeText, grantorID,grantorName,true, objectInfo.getObjectOwner(),grantWGOStmt); privilegeText += grantStmt + grantWGOStmt; grantStmt.clear(); grantWGOStmt.clear(); } return STATUS_GOOD; } //*************************** End of buildPrivText ***************************** // ***************************************************************************** // * Function: buildGrantText * // * * // * Builds GRANT statement. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <privText> const std::string & In * // * is the privileges granted. * // * * // * <objectGranteeText> const std::string & In * // * is the object the privileges are granted on and to whom. * // * * // * <grantorID> const int32_t In * // * is the ID of the authID who granted the privilege(s). If the system * // * (_SYSTEM) granted the privilege, the command is prefixed with "--" to * // * prevent execution in a playback script. * // * * // * <isWGO> bool In * // * if true, add the clause WITH GRANT OPTION to the command. * // * * // * <grantText> std::string & Out * // * passes back the grant command. * // * * // ***************************************************************************** void static buildGrantText( const std::string & privText, const std::string & objectGranteeText, const int32_t grantorID, const std::string grantorName, bool isWGO, const int32_t objectOwner, std::string & grantText) { if (grantorID == SYSTEM_AUTH_ID) grantText += "-- "; grantText += "GRANT "; grantText += privText; // remove last ',' size_t commaPos = grantText.find_last_of(","); if (commaPos != std::string::npos) grantText.replace(commaPos, 1, ""); grantText += objectGranteeText; if (isWGO) grantText += " WITH GRANT OPTION"; else if (grantorID != objectOwner && grantorID != SYSTEM_AUTH_ID) { grantText += " GRANTED BY "; bool delimited = isDelimited(grantorName); if (delimited) grantText += "\""; grantText += grantorName; if (delimited) grantText += "\""; } grantText += ";\n"; } //*************************** End of buildGrantText **************************** // ***************************************************************************** // * Function: buildColumnSecurityKeys * // * * // * Builds security keys for privileges granted on one or more columns of * // * an object. * // * * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * * // * <objectUID> const int64_t In * // * is the unique ID of the object. * // * * // * <roleIDs> const PrivColList & colPrivsList In * // * is a list of the column privileges granted on this object to the * // * specified grantee. * // * * // * <granteeID> const int32_t In * // * is the ID of the user granted the column privilege(s). * // * * // * <secKeySet> std::vector <ComSecurityKey *> & Out * // * passes back a list of SUBJECT_IS_OBJECT security keys for each of the * // * privileges granted on the object to the grantee. * // * * // ***************************************************************************** // * * // * Returns: PrivStatus * // * * // * STATUS_GOOD: Security keys were built * // * *: Security keys were not built, see diags. * // * * // ***************************************************************************** static PrivStatus buildColumnSecurityKeys( const int64_t objectUID, const PrivColList & colPrivsList, const int32_t granteeID, std::vector<ComSecurityKey *> & secKeySet) { // ***************************************************************************** // * * // * Optimizer currently does not support OBJECT_IS_COLUMN, so we combine * // * all column-level privileges to one priv bitmap and create a key for * // * each priv type the grantee has on the object. * // * * // ***************************************************************************** PrivColumnBitmap privBitmap; for (PrivColIterator columnIterator = colPrivsList.begin(); columnIterator != colPrivsList.end(); ++columnIterator) privBitmap |= columnIterator->second; for (size_t i = FIRST_DML_COL_PRIV; i <= LAST_DML_COL_PRIV; i++ ) { if (!privBitmap.test(PrivType(i))) continue; ComSecurityKey *key = new ComSecurityKey(granteeID, objectUID, PrivType(i), ComSecurityKey::OBJECT_IS_OBJECT); if (!key->isValid()) return STATUS_ERROR; secKeySet.push_back(key); } return STATUS_GOOD; } //********************** End of buildColumnSecurityKeys ************************ // ***************************************************************************** // * Function: buildUserSecurityKeys * // * * // * Builds security keys for a user and the roles granted to the user. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * * // * <roleIDs> const std::vector<int32_t> & In * // * is a reference to a vector of roleIDs that have been granted to the * // * user. * // * * // * * // * <userID> const int32_t In * // * is the ID of the user granted the role(s). * // * * // * <secKeySet> std::vector <ComSecurityKey *> & Out * // * passes back a list of SUBJECT_IS_USER security keys for each of the * // * roles granted to the user. * // * * // ***************************************************************************** // * * // * Returns: PrivStatus * // * * // * STATUS_GOOD: Security keys were built * // * *: Security keys were not built, see diags. * // * * // ***************************************************************************** static PrivStatus buildUserSecurityKeys( const std::vector<int32_t> & roleIDs, const int32_t userID, std::vector<ComSecurityKey *> & secKeySet) { for ( size_t i = 0; i < roleIDs.size(); i++ ) { ComSecurityKey *key = new ComSecurityKey(userID,roleIDs[i], ComSecurityKey::SUBJECT_IS_USER); if (key->isValid()) secKeySet.push_back(key); else return STATUS_ERROR; } return STATUS_GOOD; } //*********************** End of buildUserSecurityKeys ************************* // ***************************************************************************** // * Function: closeColumnList * // * * // * This function closes a list of the form "(column, column, column, ...".* // * The last comma is removed and a closing parenthesis is added. * // * * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <columnList> const std::string & In/Out * // * is the list of columns in string. * // * * // ***************************************************************************** void static closeColumnList(std::string & columnList) { size_t commaPos = columnList.find_last_of(","); // If there is no comma in the string, input not recognized, return unchanged. if (commaPos == std::string::npos) return; // Replace the trailing comma and space with a parenthesis and trailing comma. // Add an additional trailing space for readability. columnList.replace(commaPos,2,"),"); columnList += " "; } //************************** End of closeColumnList **************************** // ***************************************************************************** // * * // * Function: deleteRowList * // * * // * Deletes elements from a vector of PrivMgrMDRows. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <rowList> std::vector<PrivMgrMDRow *> & In/Out * // * is the list of rows to delete. * // * * // * * // ***************************************************************************** static void deleteRowList(std::vector<PrivMgrMDRow *> & rowList) { while(!rowList.empty()) delete rowList.back(), rowList.pop_back(); } //************************** End of deleteRowList ***************************** // ***************************************************************************** // * Function: findColumnEntry * // * * // * This function searches a vector of ColPrivEntry for a matching * // * column ordinal. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * * // * <colPrivEntries> std::vector<ColPrivEntry> & In * // * is a reference to a vector of ColPrivEntry. * // * * // * <columnOrdinal> const int32_t In * // * is the column ordinal to search for. * // * * // ***************************************************************************** // * * // * Returns: ColPrivEntry * // * * // * NULL: No entry found with that column ordinal * // * *: Entry found with matching column ordinal * // * * // ***************************************************************************** static ColPrivEntry * findColumnEntry( std::vector<ColPrivEntry> & colPrivEntries, const int32_t columnOrdinal) { for (size_t i = 0; i < colPrivEntries.size(); i++) if (colPrivEntries[i].columnOrdinal == columnOrdinal) return & colPrivEntries[i]; return NULL; } //************************** End of findColumnEntry **************************** // ***************************************************************************** // * Function: getColRowsForGrantee * // * * // * Returns the list of column privileges granted for the object that have * // * been granted to the granteeID. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <columnRowList> std::vector<PrivMgrMDRow *> & In * // * is the list of column privileges granted on the object. * // * * // * <granteeID> const int32_t In * // * is the authID granted the privileges. * // * * // * <roleIDs> const std::vector<int32_t> & In * // * is a list of roles granted to the grantee. * // * * // * <rowList> std::vector<PrivMgrMDRow *> & Out * // * passes back a list rows representing the privileges granted. * // * * // * <secKeySet> std::vector <ComSecurityKey *> & Out * // * passes back a list of SUBJECT_IS_USER security keys for each of the * // * roles granted to the grantee. * // * * // ***************************************************************************** // * Returns: PrivStatus * // * * // * STATUS_GOOD: Row returned. * // * STATUS_NOTFOUND: No matching rows were found * // ***************************************************************************** static PrivStatus getColRowsForGrantee( const std::vector <PrivMgrMDRow *> &columnRowList, const int32_t granteeID, const std::vector<int32_t> & roleIDs, std::vector<ColumnPrivsMDRow> & rowList, std::vector <ComSecurityKey *>* secKeySet) { std::vector<int32_t> authIDs = roleIDs; authIDs.push_back(granteeID); authIDs.push_back(PUBLIC_AUTH_ID); std::vector<int32_t>::iterator it; std::vector<PrivMgrMDRow *> privRowList; // returns the list of rows for the grantee, roles that the grantee has been // granted, and PUBLIC for (size_t i = 0; i < columnRowList.size(); i++) { ColumnPrivsMDRow &row = static_cast<ColumnPrivsMDRow &> (*columnRowList[i]); it = std::find(authIDs.begin(), authIDs.end(), row.granteeID_); if (it != authIDs.end()) rowList.push_back(row); } if (rowList.empty()) return STATUS_NOTFOUND; if (secKeySet != NULL) return buildUserSecurityKeys(roleIDs,granteeID,*secKeySet); return STATUS_GOOD; } //*********************** End of getColRowsForGrantee ************************** // ***************************************************************************** // * Function: getColRowsForGranteeGrantor * // * * // * Returns the list of column privileges granted for the object that have * // * been granted by the grantorID to the granteeID. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <columnRowList> std::vector <PrivMgrMDRow *> & In * // * is the list of column privileges granted to the object. * // * * // * <granteeID> const int32_t In * // * is the authID granted the privileges. * // * * // * <grantorID> const int32_t In * // * is the authID who granted the privileges. * // * * // * <grantedColPrivs> std::vector<ColPrivEntry> & Out * // * passes back a privileges granted to <granteeID> by <grantorID>. * // * * // ***************************************************************************** static void getColRowsForGranteeGrantor( const std::vector <PrivMgrMDRow *> &columnRowList, const int32_t granteeID, const int32_t grantorID, std::vector<ColPrivEntry> & grantedColPrivs) { for (size_t i = 0; i < columnRowList.size(); ++i) { ColumnPrivsMDRow &row = static_cast<ColumnPrivsMDRow &> (*columnRowList[i]); ColPrivEntry colPrivGrant; if (row.grantorID_ == grantorID && row.granteeID_ == granteeID) { colPrivGrant.columnOrdinal = row.columnOrdinal_; colPrivGrant.privsBitmap = row.privsBitmap_.to_ulong(); colPrivGrant.grantableBitmap = row.grantableBitmap_.to_ulong(); grantedColPrivs.push_back(colPrivGrant); } } } //******************* End of getColRowsForGranteeGrantor *********************** // ***************************************************************************** // * Function: hasAllDMLPrivs * // * * // * This function determines if a privilege bitmap has all the DML * // * privileges for a specified object type. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <objectType> ComObjectType In * // * is the type of the object. * // * * // * <privBitmap> PrivObjectBitmap In * // * is the bitmap representing the privileges. * // * * // ***************************************************************************** static bool hasAllDMLPrivs( ComObjectType objectType, PrivObjectBitmap privBitmap) { switch (objectType) { case COM_BASE_TABLE_OBJECT: case COM_VIEW_OBJECT: if (privBitmap.test(DELETE_PRIV) && privBitmap.test(INSERT_PRIV) && privBitmap.test(REFERENCES_PRIV) && privBitmap.test(SELECT_PRIV) && privBitmap.test(UPDATE_PRIV)) return true; break; case COM_LIBRARY_OBJECT: if (privBitmap.test(UPDATE_PRIV) && privBitmap.test(USAGE_PRIV)) return true; break; case COM_STORED_PROCEDURE_OBJECT: case COM_USER_DEFINED_ROUTINE_OBJECT: if (privBitmap.test(EXECUTE_PRIV)) return true; break; case COM_SEQUENCE_GENERATOR_OBJECT: if (privBitmap.test(USAGE_PRIV)) return true; break; default: return false; } return false; } //************************** End of hasAllDMLPrivs ***************************** // ***************************************************************************** // * Function: hasColumnWGO * // * * // * This function determines if the grantor has the authority to grant * // * the specified privileges. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <colPrivsArrayIn> const std::vector<ColPrivSpec> & In * // * is the list of privileges the grantor wants to grant. * // * * // * <roleIDs> std::vector<int_32> & In * // * is the list of role IDs granted to the grantor. * // * * // * <privStatus> PrivStatus & In * // * passes back the PrivStatus. * // * * // * * // ***************************************************************************** // * * // * Returns: bool * // * * // * true: Grantor has WGO. * // * false: Grantor does NOT have WGO. See privStatus. * // * * // ***************************************************************************** bool PrivMgrPrivileges::hasColumnWGO( const std::vector<ColPrivSpec> & colPrivsArrayIn, const std::vector<int32_t> &roleIDs, PrivStatus & privStatus) { std::vector<ColPrivSpec> &colPrivsArray = const_cast<std::vector<ColPrivSpec> &>(colPrivsArrayIn); privStatus = STATUS_GOOD; // Grantor may have column WGO from two sources, object-level grants on the // object and column-level grants. First check the object-level grants. std::vector<PrivMgrMDRow *> objRowList; // Get object privileges that the grantor has been granted - that is, the // grantor becomes the grantee. privStatus = getRowsForGrantee(objectUID_,grantorID_,true,roleIDs,objRowList,NULL); if (privStatus == STATUS_ERROR) return privStatus; // For each privilege to grant, see if the grantor has been granted that // privilege WITH GRANT OPTION (WGO). If so, note it in the colPrivsArray entry. // If the grantor does not have WGO, note that we have to check column // privileges for at least one grant. bool checkColumnPrivs = false; for (size_t i = 0; i < colPrivsArray.size(); i++) { ColPrivSpec &colPrivEntry = colPrivsArray[i]; colPrivEntry.grantorHasWGO = false; for (size_t j = 0; j < objRowList.size(); j++) { ObjectPrivsMDRow &objectRow = static_cast<ObjectPrivsMDRow &> (*objRowList[j]); if (objectRow.grantableBitmap_.test(colPrivEntry.privType)) { colPrivEntry.grantorHasWGO = true; break; } } if (!colPrivEntry.grantorHasWGO) checkColumnPrivs = true; } // If object-level privileges are sufficient to grant the column-level // privileges, no need to read COLUMN_PRIVILEGES table. if (!checkColumnPrivs) return true; // The grantor did not have WGO at the object level for at least one // of the privileges to be granted; see if they have the column privilege WGO. // Fetch any relevant WGO rows from COLUMN_PRIVILEGES. std::vector<PrivMgrMDRow *> colRowList; // Get object privileges that the grantor has been granted - that is, the // grantor becomes the grantee. privStatus = getRowsForGrantee(objectUID_,grantorID_,false,roleIDs,colRowList,NULL); if (privStatus == STATUS_ERROR) return privStatus; for (size_t i = 0; i < colPrivsArray.size(); i++) { // If the grantor already has the authority to grant this privilege // from another source, move to the next privilege to be granted. if (colPrivsArray[i].grantorHasWGO) continue; ColPrivSpec &colPrivEntry = colPrivsArray[i]; // See if the grantor has been granted WGO at column-level for priv. for (size_t j = 0; i < colRowList.size(); j++) { ColumnPrivsMDRow &columnRow = static_cast<ColumnPrivsMDRow &> (*colRowList[i]); if (columnRow.grantableBitmap_.test(colPrivEntry.privType)) { colPrivEntry.grantorHasWGO = true; break; } } // If the grantor does not have an object-level or column-level WGO // for one of the privs to grant, return an error. if (!colPrivEntry.grantorHasWGO) { privStatus = STATUS_NOTFOUND; return false; } } return true; } //*************************** End of hasColumnWGO ****************************** // ***************************************************************************** // * * // * Function: hasGrantedColumnPriv * // * * // * This function determines if a grantor has granted the specified * // * set of privileges on the specified object to the specified grantee. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <colRowList> std::vector<PrivMgrMDRow *> & In * // * is the list of all column privileges granted to the object. * // * * // * <grantorID> const int32_t In * // * is the authorization ID of the grantor. * // * * // * <granteeID> const int32_t In * // * is the authorization ID of the grantee. * // * * // * <colPrivsArray> const std::vector<ColPrivSpec> & In * // * is an array of column privilege specifications, with one entry per * // * privilege and column. * // * * // * <privStatus> PrivStatus Out * // * passes back the PrivStatus for last operation. * // * * // * <privilege> std::string & Out * // * passes back the first privilege not granted. * // * * // * <grantedColPrivs> std::vector<ColPrivEntry> & Out * // * passes back an array of column privilege entries, with one entry per * // * column--privileges are combined into one bitmap. * // * * // ***************************************************************************** // * * // * Returns: bool * // * * // * true: All specified privileges have been granted. * // * false: One or more privileges have not been granted * // * * // ***************************************************************************** static bool hasGrantedColumnPriv( const std::vector<PrivMgrMDRow *> &colRowList, int32_t grantorID, int32_t granteeID, const std::vector<ColPrivSpec> & colPrivsArray, PrivStatus & privStatus, std::string & privilege, std::vector<ColPrivEntry> & grantedColPrivs) { privStatus = STATUS_GOOD; // For each privilege to revoke, see if the grantor has granted that privilege // to the grantee for the specified column. If not, return an error. for (size_t i = 0; i < colPrivsArray.size(); i++) { const ColPrivSpec &colPrivEntry = colPrivsArray[i]; bool grantFound = false; for (size_t j = 0; j < colRowList.size(); j++) { ColumnPrivsMDRow &columnRow = static_cast<ColumnPrivsMDRow &> (*colRowList[j]); // Only look at rows with the requested grantor and grantee if (columnRow.grantorID_ == grantorID && columnRow.granteeID_ == granteeID) { if (columnRow.columnOrdinal_ == colPrivEntry.columnOrdinal && columnRow.privsBitmap_.test(colPrivEntry.privType)) { grantFound = true; break; } } } if (!grantFound) { privilege = PrivMgrUserPrivs::convertPrivTypeToLiteral((PrivType)colPrivEntry.privType); privStatus = STATUS_NOTFOUND; return false; } } // Build array of granted privileges. One entry per column granted a privilege // from grantor to grantee. for (size_t j = 0; j < colRowList.size(); j++) { ColumnPrivsMDRow &columnRow = static_cast<ColumnPrivsMDRow &> (*colRowList[j]); if (columnRow.grantorID_ == grantorID && columnRow.granteeID_ == granteeID) { ColPrivEntry grantedColPriv; grantedColPriv.columnOrdinal = columnRow.columnOrdinal_; grantedColPriv.grantableBitmap = columnRow.grantableBitmap_; grantedColPriv.privsBitmap = columnRow.privsBitmap_; grantedColPrivs.push_back(grantedColPriv); } } return true; } //*********************** End of hasGrantedColumnPriv ************************** // ***************************************************************************** // * * // * Function: isDelimited * // * * // * This function checks the passed in string for characters other than * // * alphanumeric and underscore characters. If so, the name is delimited * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <strToScan> const std::string & In * // * is the string to search for delimited characters * // ***************************************************************************** // * * // * Returns: bool * // * * // * true: the passed in string contains delimited characters * // * false: the passed in string contains no delimited characters * // * * // ***************************************************************************** static bool isDelimited( const std::string &strToScan) { char firstChar = strToScan[0]; if (isdigit(firstChar) || strToScan[0] == '_' ) return true; string validChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; size_t found = strToScan.find_first_not_of(validChars); if (found == string::npos) return false; return true; } //*********************** End of isDelimited *********************************** // ***************************************************************************** // ObjectPrivsMDRow methods // ***************************************************************************** void ObjectPrivsMDRow::describeRow (std::string &rowDetails) { rowDetails = "OBJECT_PRIVILEGES row: type is "; char objectTypeLit[3] = {0}; strncpy(objectTypeLit,PrivMgr::ObjectEnumToLit(objectType_),2); rowDetails += objectTypeLit; rowDetails += ", UID is "; rowDetails += to_string((long long int) objectUID_); rowDetails += ", grantor is "; rowDetails += to_string((long long int)grantorID_); rowDetails += ", grantee is "; rowDetails += to_string((long long int) granteeID_); } // ***************************************************************************** // ObjectPrivsMDTable methods // ***************************************************************************** // ***************************************************************************** // * method: ObjectPrivsMDTable::selectWhereUnique // * // * Select the row from the OBJECT_PRIVILEGES table based on the specified // * WHERE clause - where clause should only return a single row // * // * Parameters: // * // * <whereClause> is the WHERE clause specifying a unique row. // * <rowOut> passes back a set of OBJECT_PRIVILEGES rows // * // * Returns: PrivStatus // * // * STATUS_GOOD: Row returned. // * *: Select failed. A CLI error is put into the diags area. // ***************************************************************************** PrivStatus ObjectPrivsMDTable::selectWhereUnique( const std::string & whereClause, PrivMgrMDRow & rowOut) { ObjectPrivsMDRow & row = static_cast<ObjectPrivsMDRow &>(rowOut); PrivStatus retcode = STATUS_GOOD; // Should space be allocated and deleted from the heap for the rowList? // -- how many rows will be returned? std::vector<PrivMgrMDRow* > rowList; std::string orderByClause; retcode = selectWhere(whereClause, orderByClause, rowList); if (retcode == STATUS_GOOD) { // The I/O should be performed on a primary key so only one row returned // If not, return an internal error if (rowList.size() != 1) { while(!rowList.empty()) delete rowList.back(), rowList.pop_back(); PRIVMGR_INTERNAL_ERROR("Select unique for object_privileges table returned more than 1 row"); return STATUS_ERROR; } row = static_cast<ObjectPrivsMDRow &>(*rowList[0]); } while(!rowList.empty()) delete rowList.back(), rowList.pop_back(); return retcode; } // ***************************************************************************** // * method: ObjectPrivsMDTable::selectWhere // * // * Selects rows from the OBJECT_PRIVILEGES table based on the specified // * WHERE clause. // * // * Parameters: // * // * <whereClause> is the WHERE clause // * <orderByClause> is the ORDER BY clause defining returned row order. // * <rowOut> passes back a set of OBJECT_PRIVILEGES rows // * // * Returns: PrivStatus // * // * STATUS_GOOD: Row returned. // * *: Select failed. A CLI error is put into the diags area. // ***************************************************************************** PrivStatus ObjectPrivsMDTable::selectWhere( const std::string & whereClause, const std::string & orderByClause, std::vector<PrivMgrMDRow *> &rowList) { std::string selectStmt ("SELECT OBJECT_UID, OBJECT_NAME, OBJECT_TYPE, "); selectStmt += ("GRANTEE_ID, GRANTEE_NAME, GRANTEE_TYPE, "); selectStmt += ("GRANTOR_ID, GRANTOR_NAME, GRANTOR_TYPE, "); selectStmt += ("PRIVILEGES_BITMAP, GRANTABLE_BITMAP FROM "); selectStmt += tableName_; selectStmt += " "; selectStmt += whereClause; selectStmt += orderByClause; // set pointer in diags area int32_t diagsMark = pDiags_->mark(); ExeCliInterface cliInterface(STMTHEAP, NULL, NULL, CmpCommon::context()->sqlSession()->getParentQid()); Queue * tableQueue = NULL; int32_t cliRC = cliInterface.fetchAllRows(tableQueue, (char *)selectStmt.c_str(), 0, false, false, true); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } if (cliRC == 100) // did not find the row { pDiags_->rewind(diagsMark); return STATUS_NOTFOUND; } tableQueue->position(); for (int idx = 0; idx < tableQueue->numEntries(); idx++) { OutputInfo * pCliRow = (OutputInfo*)tableQueue->getNext(); ObjectPrivsMDRow *pRow = new ObjectPrivsMDRow(); setRow(pCliRow, *pRow); rowList.push_back(pRow); } return STATUS_GOOD; } // ***************************************************************************** // * method: ObjectPrivsMDTable::setRow // * // * Create an ObjectPrivsMDRow object from the information returned from the // * cli. // * // * Parameters: // * // * <OutputInfo> row destails from the cli // * <rowOut> passes back the ObjectPrivsMDRow row // * // * no errors should be generated // ***************************************************************************** // Row read successfully. Extract the columns. void ObjectPrivsMDTable::setRow (OutputInfo *pCliRow, ObjectPrivsMDRow &row) { char * ptr = NULL; Int32 len = 0; char value[500]; // column 1: object uid pCliRow->get(0,ptr,len); row.objectUID_ = *(reinterpret_cast<int64_t*>(ptr)); // column 2: object name pCliRow->get(1,ptr,len); assert (len < 257); strncpy(value, ptr, len); value[len] = 0; row.objectName_ = value; // column 3: object type pCliRow->get(2,ptr,len); assert (len < 3); strncpy(value, ptr, len); value[len] = 0; row.objectType_ = PrivMgr::ObjectLitToEnum(value); // column 4: grantee uid pCliRow->get(3,ptr,len); row.granteeID_ = *(reinterpret_cast<int32_t*>(ptr)); // column 5: grantee name pCliRow->get(4,ptr,len); assert (len < 257); strncpy(value, ptr, len); value[len] = 0; row.granteeName_ = value; // column 6: grantee type pCliRow->get(5,ptr,len); assert (len < 3); strncpy(value, ptr, len); value[len] = 0; row.granteeType_ = value; // column 7: grantor uid pCliRow->get(6,ptr,len); row.grantorID_ = *(reinterpret_cast<int32_t*>(ptr)); //column 8: grantor name pCliRow->get(7,ptr,len); assert (len < 257); strncpy(value, ptr, len); value[len] = 0; row.grantorName_ = value; //column 9: grantor type pCliRow->get(8,ptr,len); assert (len < 3); strncpy(value, ptr, len); value[len] = 0; row.grantorType_ = value; // column 10: privileges bitmap pCliRow->get(9,ptr,len); int64_t bitmapInt = *(reinterpret_cast<int64_t*>(ptr)); row.privsBitmap_ = bitmapInt; // column 11: grantable bitmap pCliRow->get(10,ptr,len); bitmapInt = *(reinterpret_cast<int64_t*>(ptr)); row.grantableBitmap_ = bitmapInt; // set current_ PrivMgrCoreDesc tempDesc (row.privsBitmap_, row.grantableBitmap_); row.current_= tempDesc; row.visited_.setAllPrivAndWgo(false); } // ***************************************************************************** // * method: ObjectPrivsMDTable::insert // * // * Inserts a row into the OBJECT_PRIVILEGES table. // * // * Parameters: // * // * <rowIn> is a ObjectPrivsMDRow to be inserted. // * // * Returns: PrivStatus // * // * STATUS_GOOD: Row inserted. // * *: Insert failed. A CLI error is put into the diags area. // ***************************************************************************** PrivStatus ObjectPrivsMDTable::insert(const PrivMgrMDRow &rowIn) { char insertStmt[2000]; const ObjectPrivsMDRow &row = static_cast<const ObjectPrivsMDRow &>(rowIn); int64_t privilegesBitmapLong = row.privsBitmap_.to_ulong(); int64_t grantableBitmapLong = row.grantableBitmap_.to_ulong(); char objectTypeLit[3] = {0}; strncpy(objectTypeLit,PrivMgr::ObjectEnumToLit(row.objectType_),2); sprintf(insertStmt, "insert into %s values (%ld, '%s', '%s', %d, '%s', '%s', %d, '%s', '%s', %ld, %ld)", tableName_.c_str(), row.objectUID_, row.objectName_.c_str(), objectTypeLit, row.granteeID_, row.granteeName_.c_str(), row.granteeType_.c_str(), row.grantorID_, row.grantorName_.c_str(), row.grantorType_.c_str(), privilegesBitmapLong, grantableBitmapLong); ExeCliInterface cliInterface(STMTHEAP, NULL, NULL, CmpCommon::context()->sqlSession()->getParentQid()); int32_t cliRC = cliInterface.executeImmediate(insertStmt); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(pDiags_); return STATUS_ERROR; } // For some reason, insert sometimes returns error even though // the row is inserted, so unless an errors, return STATUS_GOOD return STATUS_GOOD; } // ***************************************************************************** // * method: ObjectPrivsMDTable::deleteRow // * // * Deletes a row from the OBJECT_PRIVILEGES table based on the primary key // * contents of the row. // * // * Parameters: // * // * <row> defines what row should be deleted // * // * Returns: PrivStatus // * // * STATUS_GOOD: Row deleted. // * *: Insert failed. A CLI error is put into the diags area. // ***************************************************************************** PrivStatus ObjectPrivsMDTable::deleteRow(const ObjectPrivsMDRow & row) { char whereClause[1000]; sprintf(whereClause," WHERE object_uid = %ld AND grantor_id = %d AND grantee_id = %d ", row.objectUID_,row.grantorID_,row.granteeID_); return deleteWhere(whereClause); } // ***************************************************************************** // * method: ObjectPrivsMDTable::deleteWhere // * // * Deletes a row from the OBJECT_PRIVILEGES table based on the where clause // * // * Parameters: // * // * <whereClause> defines what rows should be deleted // * // * Returns: PrivStatus // * // * STATUS_GOOD: Row(s) deleted. // * *: Insert failed. A CLI error is put into the diags area. // ***************************************************************************** PrivStatus ObjectPrivsMDTable::deleteWhere(const std::string & whereClause) { std::string deleteStmt ("DELETE FROM "); deleteStmt += tableName_; deleteStmt += " "; deleteStmt += whereClause; // set pointer in diags area int32_t diagsMark = pDiags_->mark(); ExeCliInterface cliInterface(STMTHEAP, NULL, NULL, CmpCommon::context()->sqlSession()->getParentQid()); int32_t cliRC = cliInterface.executeImmediate(deleteStmt.c_str()); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } if (cliRC == 100) // did not find any rows { pDiags_->rewind(diagsMark); return STATUS_NOTFOUND; } if (cliRC > 0) return STATUS_WARNING; return STATUS_GOOD; } // ***************************************************************************** // * method: ObjectPrivsMDTable::updateRow // * // * Updates grantor and bitmaps for a row in the OBJECT_PRIVILEGES table // * based on the contents of the row. // * // * Parameters: // * // * <row> defines what row should be updated // * // * Returns: PrivStatus // * // * STATUS_GOOD: Row(s) deleted. // * *: Insert failed. A CLI error is put into the diags area. // ***************************************************************************** PrivStatus ObjectPrivsMDTable::updateRow(const ObjectPrivsMDRow & row) { char setClause[1000]; int64_t privilegesBitmapLong = row.privsBitmap_.to_ulong(); int64_t grantableBitmapLong = row.grantableBitmap_.to_ulong(); sprintf(setClause," SET grantor_id = %d, grantor_name = '%s', " " privileges_bitmap = %ld, grantable_bitmap = %ld ", row.grantorID_,row.grantorName_.c_str(),privilegesBitmapLong,grantableBitmapLong); char whereClause[1000]; sprintf(whereClause," WHERE object_uid = %ld AND grantor_id = %d AND grantee_id = %d ", row.objectUID_,row.grantorID_,row.granteeID_); return updateWhere(setClause,whereClause); } // ---------------------------------------------------------------------------- // method: updateWhere // // This method updates one or more rows from the OBJECT_PRIVILEGES table // The number of rows affected depend on the passed in set clause // // Input: setClause // whereClause // Output: status of the operation // // A cli error is put into the diags area if there is an error // ---------------------------------------------------------------------------- PrivStatus ObjectPrivsMDTable::updateWhere(const std::string & setClause, const std::string & whereClause) { std::string updateStmt ("UPDATE "); updateStmt += tableName_; updateStmt += " "; updateStmt += setClause; updateStmt += " "; updateStmt += whereClause; // set pointer in diags area int32_t diagsMark = pDiags_->mark(); ExeCliInterface cliInterface(STMTHEAP, NULL, NULL, CmpCommon::context()->sqlSession()->getParentQid()); int32_t cliRC = cliInterface.executeImmediate(updateStmt.c_str()); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } if (cliRC == 100) // did not find any rows { pDiags_->rewind(diagsMark); return STATUS_NOTFOUND; } if (cliRC > 0) return STATUS_WARNING; return STATUS_GOOD; } // ---------------------------------------------------------------------------- // method::insertSelect // // This method inserts owner rows into the OBJECT_PRIVILEGES table // // Input: objectsLocation - name of objects table // authsLocation - name of auths table // // Output: PrivStatus // // the following is a sample insert select statement that gets processed: // // insert into OBJECT_PRIVILEGES // select distinct // object_uid, // <catalogName> "<schema_name>"."<object_name>", // object_type, // object_owner, -- granteeID // (select auth_db_name from AUTHS where auth_id = object_owner), --granteeName // USER_GRANTEE_LIT, -- "U" // SYSTEM_AUTH_ID, -- system grantor ID (-2) // SYSTEM_AUTH_NAME, -- grantorName (_SYSTEM) // SYSTEM_GRANTOR_LIST, -- "S" // case // when object_type = 'BT' then 47 // when object_type = 'VI' then 1 // when object_type = 'LB' then 24 // when object_type = 'UR' then 64 // when object_type = 'SG' then 16 // else 0 // end as privilegesBitmap, // case // when object_type = 'BT' then 47 // when object_type = 'VI' then 0 // when object_type = 'LB' then 24 // when object_type = 'UR' then 0 // when object_type = 'SG' then 16 // else 0 // end as grantableBitmap // from OBJECTS // where object_type in ('VI','BT','LB','UR','SG') // // The ComDiags area is set up with unexpected errors // ---------------------------------------------------------------------------- PrivStatus ObjectPrivsMDTable::insertSelect( const std::string &objectsLocation, const std::string &authsLocation) { // Before inserting rows, make sure that the OBJECT_PRIVILEGES table is empty char buf[2000]; sprintf(buf, "select count(*) from %s", tableName_.c_str()); Int64 rowsSelected = 0; Lng32 theLen = 0; ExeCliInterface cliInterface(STMTHEAP, NULL, NULL, CmpCommon::context()->sqlSession()->getParentQid()); int32_t cliRC = cliInterface.executeImmediate(buf, (char*)&rowsSelected, &theLen, NULL); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } if (rowsSelected != 0) { std::string message ("Found "); message += to_string((long long int)rowsSelected); message += " rows in OBJECT_PRIVILEGES table, expecting 0 rows"; PRIVMGR_INTERNAL_ERROR(message.c_str()); return STATUS_ERROR; } // Create bitmaps for all supported object types; PrivMgrDesc privDesc; privDesc.setAllTableGrantPrivileges(true); int64_t tableBits = privDesc.getTablePrivs().getPrivBitmap().to_ulong(); privDesc.setAllLibraryGrantPrivileges(true); int64_t libraryBits = privDesc.getTablePrivs().getPrivBitmap().to_ulong(); privDesc.setAllUdrGrantPrivileges(true); int64_t udrBits = privDesc.getTablePrivs().getPrivBitmap().to_ulong(); privDesc.setAllSequenceGrantPrivileges(true); int64_t sequenceBits = privDesc.getTablePrivs().getPrivBitmap().to_ulong(); // for views, privilegesBitmap is set to 1 (SELECT), wgo to 0 (no) std::string systemGrantor("_SYSTEM"); // Generate case stmt for grantable bitmap sprintf (buf, "case when object_type = 'BT' then %ld " " when object_type = 'VI' then 1 " " when object_type = 'LB' then %ld " " when object_type = 'UR' then %ld " " when object_type = 'SG' then %ld " " else 0 end", tableBits, libraryBits, udrBits, sequenceBits); std::string privilegesClause(buf); sprintf (buf, "case when object_type = 'BT' then %ld " " when object_type = 'VI' then 0 " " when object_type = 'LB' then %ld " " when object_type = 'UR' then %ld " " when object_type = 'SG' then %ld " " else 0 end", tableBits, libraryBits, udrBits, sequenceBits); std::string grantableClause(buf); sprintf(buf, "insert into %s select distinct object_uid, " "trim(catalog_name) || '.\"' || trim(schema_name) || '\".\"' || trim(object_name) || '\"', " "object_type, object_owner, " "(select auth_db_name from %s where auth_id = o.object_owner) as auth_db_name, " "'%s', %d, '%s', '%s', %s, %s from %s o " "where o.object_type in ('VI','BT','LB','UR','SG')", tableName_.c_str(), authsLocation.c_str(), USER_GRANTEE_LIT, SYSTEM_AUTH_ID, SYSTEM_AUTH_NAME, SYSTEM_GRANTOR_LIT, privilegesClause.c_str(), grantableClause.c_str(), objectsLocation.c_str()); // set pointer in diags area int32_t diagsMark = pDiags_->mark(); Int64 rowsInserted = 0; cliRC = cliInterface.executeImmediate(buf, NULL, NULL, FALSE, &rowsInserted); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } // Bug: for some reasons, insert returns NOTFOUND even though the // operations succeeded. if (cliRC == 100) { pDiags_->rewind(diagsMark); cliRC = 0; } // Make sure rows were inserted correctly. // Get the expected number of rows sprintf(buf, "select count(*) from %s o where o.object_type in ('VI','BT','LB','UR', 'SG')" " and object_owner > 0", objectsLocation.c_str()); Lng32 len = 0; cliRC = cliInterface.executeImmediate(buf, (char*)&rowsSelected, &len, NULL); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } // Check to see if the number of rows selected match the rows inserted if (rowsInserted != rowsSelected) { std::string message ("Expected to insert "); message += to_string((long long int)rowsSelected); message += " rows into OBJECT_PRIVILEGES table, instead "; message += to_string((long long int)rowsInserted); message += " were found."; PRIVMGR_INTERNAL_ERROR(message.c_str()); return STATUS_ERROR; } return STATUS_GOOD; } // ---------------------------------------------------------------------------- // method::insertSelect // // This method inserts a grant of SELECT on the AUTHS table to PUBLIC // into the OBJECT_PRIVILEGES table // // Input: objectsLocation - name of objects table // authsLocation - name of auths table // // Output: PrivStatus // // The ComDiags area is set up with unexpected errors // ---------------------------------------------------------------------------- PrivStatus ObjectPrivsMDTable::insertSelectOnAuthsToPublic( const std::string &objectsLocation, const std::string &authsLocation) { char buf[2000]; sprintf(buf, "insert into %s select o.object_uid,'%s','BT',-1,'PUBLIC','U'," "%d,'DB__ROOT','U',1,0 FROM %s O WHERE O.OBJECT_NAME = 'AUTHS'", tableName_.c_str(),authsLocation.c_str(), MIN_USERID, objectsLocation.c_str()); // set pointer in diags area int32_t diagsMark = pDiags_->mark(); Int64 rowsInserted = 0; ExeCliInterface cliInterface(STMTHEAP, NULL, NULL, CmpCommon::context()->sqlSession()->getParentQid()); int32_t cliRC = cliInterface.executeImmediate(buf, NULL, NULL, FALSE, &rowsInserted); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } // Bug: for some reasons, insert returns NOTFOUND even though the // operations succeeded. if (cliRC == 100) { pDiags_->rewind(diagsMark); cliRC = 0; } return STATUS_GOOD; } // ***************************************************************************** // ColumnPrivsMDRow methods // ***************************************************************************** void ColumnPrivsMDRow::describeRow (std::string &rowDetails) { rowDetails = "COLUMN_PRIVILEGES row: UID is "; rowDetails += to_string((long long int) objectUID_); rowDetails += ", column number is "; rowDetails += to_string((long long int) columnOrdinal_); rowDetails += ", grantor is "; rowDetails += to_string((long long int)grantorID_); rowDetails += ", grantee is "; rowDetails += to_string((long long int) granteeID_); } // ***************************************************************************** // ColumnPrivsMDTable methods // ***************************************************************************** // ***************************************************************************** // * * // * Function: ColumnPrivsMDTable::insert * // * * // * Inserts a row into the COLUMN_PRIVILEGES table. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <rowIn> const PrivMgrMDRow & In * // * is a ColumnPrivsMDRow to be inserted. * // * * // ***************************************************************************** // * * // * Returns: PrivStatus * // * * // * STATUS_GOOD: Row inserted. * // * *: Insert failed. A CLI error is put into the diags area. * // * * // ***************************************************************************** PrivStatus ColumnPrivsMDTable::insert(const PrivMgrMDRow &rowIn) { char insertStmt[2000]; const ColumnPrivsMDRow &row = static_cast<const ColumnPrivsMDRow &>(rowIn); int64_t privilegesBitmapLong = row.privsBitmap_.to_ulong(); int64_t grantableBitmapLong = row.grantableBitmap_.to_ulong(); sprintf(insertStmt, "INSERT INTO %s VALUES (%ld, '%s', %d, '%s', %d, '%s', %d, %ld, %ld)", tableName_.c_str(), row.objectUID_, row.objectName_.c_str(), row.granteeID_, row.granteeName_.c_str(), row.grantorID_, row.grantorName_.c_str(), row.columnOrdinal_, privilegesBitmapLong, grantableBitmapLong); return CLIImmediate(insertStmt); } //********************* End of ColumnPrivsMDTable::insert ********************** // ***************************************************************************** // * * // * Function: ColumnPrivsMDTable::selectWhere * // * * // * Selects rows from the COLUMN_PRIVILEGES table based on the specified * // * WHERE clause. Output is sorted by grantor, grantee, column in that order.* // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <whereClause> const std::string & In * // * is the WHERE clause specifying a unique row. * // * * // * <orderByClause> is the ORDER BY clause defining returned row order. // * <rowList> std::vector<PrivMgrMDRow *> & Out * // * passes back a set of ColumnPrivsMDRow rows. * // * * // ***************************************************************************** // * * // * Returns: PrivStatus * // * * // * STATUS_GOOD: Rows returned. * // * *: Select failed. A CLI error is put into the diags area. * // * * // ***************************************************************************** PrivStatus ColumnPrivsMDTable::selectWhere( const std::string & whereClause, const std::string & orderByClause, std::vector<PrivMgrMDRow *> &rowList) { std::string selectStmt("SELECT object_uid,object_name," "grantee_id,grantee_name," "grantor_id,grantor_name,column_number," "privileges_bitmap,grantable_bitmap FROM "); selectStmt += tableName_ + " "; selectStmt += whereClause + orderByClause; // set pointer in diags area int32_t diagsMark = pDiags_->mark(); ExeCliInterface cliInterface(STMTHEAP); Queue * tableQueue = NULL; int32_t cliRC = cliInterface.fetchAllRows(tableQueue, (char *)selectStmt.c_str(), 0, false, false, true); if (cliRC < 0) { cliInterface.retrieveSQLDiagnostics(CmpCommon::diags()); return STATUS_ERROR; } if (cliRC == 100) // did not find the row { pDiags_->rewind(diagsMark); return STATUS_NOTFOUND; } tableQueue->position(); for (int idx = 0; idx < tableQueue->numEntries(); idx++) { OutputInfo * pCliRow = (OutputInfo*)tableQueue->getNext(); ColumnPrivsMDRow *pRow = new ColumnPrivsMDRow(); setRow(pCliRow,*pRow); rowList.push_back(pRow); } return STATUS_GOOD; } //****************** End of ColumnPrivsMDTable::selectWhere ******************** // ***************************************************************************** // * * // * Function: ColumnPrivsMDTable::selectWhereUnique * // * * // * Selects a row from the COLUMN_PRIVILEGES table based on the specified * // * WHERE clause. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <whereClause> const std::string & In * // * is the WHERE clause specifying a unique row. * // * * // * <row> PrivMgrMDRow & Out * // * passes back a ColumnPrivsMDRow row. * // * * // ***************************************************************************** // * * // * Returns: PrivStatus * // * * // * STATUS_GOOD: Row returned. * // * *: Select failed. A CLI error is put into the diags area. * // * * // ***************************************************************************** PrivStatus ColumnPrivsMDTable::selectWhereUnique( const std::string & whereClause, PrivMgrMDRow & row) { //TODO: Currently unused. Added due to virtual declaration. Will be fleshed // out when run-time column privilege checking added. return STATUS_GOOD; } //************** End of ColumnPrivsMDTable::selectWhereUnique ****************** // ***************************************************************************** // * * // * Function: MyTable::setRow * // * * // * Create a ColumnPrivsMDRow object from the information returned from the // * CLI. // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <pCliRow> OutputInfo & In * // * is a pointer to the CLI interface to the row data that was read. * // * * // * <row> PrivMgrMDRow & Out * // * passes back a ColumnPrivsMDRow. * // * * // ***************************************************************************** void ColumnPrivsMDTable::setRow( OutputInfo *pCliRow, ColumnPrivsMDRow &row) { char * ptr = NULL; Int32 len = 0; char value[500]; // column 0: object uid pCliRow->get(0,ptr,len); row.objectUID_ = *(reinterpret_cast<int64_t*>(ptr)); // column 1: object name pCliRow->get(1,ptr,len); assert (len < 257); strncpy(value, ptr, len); value[len] = 0; row.objectName_ = value; // column 2: grantee id pCliRow->get(2,ptr,len); row.granteeID_ = *(reinterpret_cast<int32_t*>(ptr)); // column 3: grantee name pCliRow->get(3,ptr,len); assert (len < 257); strncpy(value, ptr, len); value[len] = 0; row.granteeName_ = value; // column 4: grantor id pCliRow->get(4,ptr,len); row.grantorID_ = *(reinterpret_cast<int32_t*>(ptr)); // column 5: grantor name pCliRow->get(5,ptr,len); assert (len < 257); strncpy(value, ptr, len); value[len] = 0; row.grantorName_ = value; // column 6: column_number pCliRow->get(6,ptr,len); row.columnOrdinal_ = *(reinterpret_cast<int32_t*>(ptr)); // column 7: privileges bitmap pCliRow->get(7,ptr,len); int64_t bitmapInt = *(reinterpret_cast<int64_t*>(ptr)); row.privsBitmap_ = bitmapInt; // column 8: grantable bitmap pCliRow->get(8,ptr,len); bitmapInt = *(reinterpret_cast<int64_t*>(ptr)); row.grantableBitmap_ = bitmapInt; } //******************* End of ColumnPrivsMDTable::setRow ************************ // ***************************************************************************** // * * // * Function: ColumnPrivsMDTable::updateRow * // * * // * Updates the bitmaps for a row in the COLUMN_PRIVILEGES table based on * // * the contents of the row. * // * * // ***************************************************************************** // * * // * Parameters: * // * * // * <row> PrivMgrMDRow & In * // * is the row to be updated. * // * * // * <whereBase> const std::string & In * // * is the WHERE clause specifying the primary keys except for the * // * column number, which is added within this function. * // * * // ***************************************************************************** // * * // * Returns: PrivStatus * // * * // * STATUS_GOOD: Row returned. * // * *: Select failed. A CLI error is put into the diags area. * // * * // ***************************************************************************** PrivStatus ColumnPrivsMDTable::updateColumnRow( const ColumnPrivsMDRow & row, const std::string whereBase) { char setClause[1000]; int64_t privilegesBitmapLong = row.privsBitmap_.to_ulong(); int64_t grantableBitmapLong = row.grantableBitmap_.to_ulong(); sprintf(setClause," SET privileges_bitmap = %ld, grantable_bitmap = %ld ", privilegesBitmapLong,grantableBitmapLong); char whereClause[1000]; sprintf(whereClause," %s %d",whereBase.c_str(),row.columnOrdinal_); return updateWhere(setClause,whereClause); } //*************** End of ColumnPrivsMDTable::updateColumnRow *******************
1
7,777
I wonder what would happen if the granteeName becomes DB__ROOT but the granteeID is not DB__ROOT ID (33333) but something else.
apache-trafodion
cpp
@@ -299,6 +299,10 @@ func (install *PleaseGoInstall) compilePackage(target string, pkg *build.Package } } + if f := os.Getenv("CFLAGS"); f != "" { + cFlags = append(cFlags, f) + } + cFiles := pkg.CFiles cgoGoFiles, cgoCFiles, err := install.tc.CGO(pkg.Dir, workDir, cFlags, pkg.CgoFiles)
1
package install import ( "bufio" "fmt" "go/build" "os" "path/filepath" "strings" "github.com/thought-machine/please/tools/please_go/install/exec" "github.com/thought-machine/please/tools/please_go/install/toolchain" ) // PleaseGoInstall implements functionality similar to `go install` however it works with import configs to avoid a // dependence on the GO_PATH, go.mod or other go build concepts. type PleaseGoInstall struct { buildContext build.Context srcRoot string moduleName string importConfig string ldFlags string outDir string trimPath string tc *toolchain.Toolchain compiledPackages map[string]string // A set of flags we from pkg-config or #cgo comments collectedLdFlags map[string]struct{} } // New creates a new PleaseGoInstall func New(buildTags []string, srcRoot, moduleName, importConfig, ldFlags, goTool, ccTool, pkgConfTool, out, trimPath string) *PleaseGoInstall { ctx := build.Default ctx.BuildTags = append(ctx.BuildTags, buildTags...) return &PleaseGoInstall{ buildContext: ctx, srcRoot: srcRoot, moduleName: moduleName, importConfig: importConfig, ldFlags: ldFlags, outDir: out, trimPath: trimPath, collectedLdFlags: map[string]struct{}{}, tc: &toolchain.Toolchain{ CcTool: ccTool, GoTool: goTool, PkgConfigTool: pkgConfTool, Exec: &exec.Executor{Stdout: os.Stdout, Stderr: os.Stderr}, }, } } // Install will compile the provided packages. Packages can be wildcards i.e. `foo/...` which compiles all packages // under the directory tree of `{module}/foo` func (install *PleaseGoInstall) Install(packages []string) error { if err := install.initBuildEnv(); err != nil { return err } if err := install.parseImportConfig(); err != nil { return err } for _, target := range packages { if !strings.HasPrefix(target, install.moduleName) { target = filepath.Join(install.moduleName, target) } if strings.HasSuffix(target, "/...") { importRoot := strings.TrimSuffix(target, "/...") err := install.compileAll(importRoot) if err != nil { return err } } else { if err := install.compile([]string{}, target); err != nil { return fmt.Errorf("failed to compile %v: %w", target, err) } pkg, err := install.importDir(target) if err != nil { panic(fmt.Sprintf("import dir failed after successful compilation: %v", err)) } if pkg.IsCommand() { if err := install.linkPackage(target); err != nil { return fmt.Errorf("failed to link %v: %w", target, err) } } } } if err := install.writeLDFlags(); err != nil { return fmt.Errorf("failed to write ld flags: %w", err) } return nil } func (install *PleaseGoInstall) writeLDFlags() error { ldFlags := make([]string, 0, len(install.collectedLdFlags)) for flag := range install.collectedLdFlags { ldFlags = append(ldFlags, flag) } if len(ldFlags) > 0 { if err := install.tc.Exec.Run("echo -n \"%s\" >> %s", strings.Join(ldFlags, " "), install.ldFlags); err != nil { return err } } return nil } func (install *PleaseGoInstall) linkPackage(target string) error { out := install.compiledPackages[target] filename := strings.TrimSuffix(filepath.Base(out), ".a") binName := filepath.Join(install.outDir, "bin", filename) return install.tc.Link(out, binName, install.importConfig, install.ldFlags) } // compileAll walks the provided directory looking for go packages to compile. Unlike compile(), this will skip any // directories that contain no .go files for the current architecture. func (install *PleaseGoInstall) compileAll(dir string) error { pkgRoot := install.pkgDir(dir) return filepath.Walk(pkgRoot, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if !info.IsDir() { relativePackage := filepath.Dir(strings.TrimPrefix(path, pkgRoot)) if err := install.compile([]string{}, filepath.Join(dir, relativePackage)); err != nil { switch err.(type) { case *build.NoGoError: // We might walk into a dir that has no .go files for the current arch. This shouldn't // be an error so we just eat this return nil default: return err } } } else if info.Name() == "testdata" { return filepath.SkipDir // Dirs named testdata are deemed not to contain buildable Go code. } return nil }) } func (install *PleaseGoInstall) initBuildEnv() error { if err := install.tc.Exec.Run("mkdir -p %s\n", filepath.Join(install.outDir, "bin")); err != nil { return err } return install.tc.Exec.Run("touch %s", install.ldFlags) } // pkgDir returns the file path to the given target package func (install *PleaseGoInstall) pkgDir(target string) string { p := strings.TrimPrefix(target, install.moduleName) return filepath.Join(install.srcRoot, p) } func (install *PleaseGoInstall) parseImportConfig() error { install.compiledPackages = map[string]string{ "unsafe": "", // Not sure how many other packages like this I need to handle "C": "", // Pseudo-package for cgo symbols } if install.importConfig != "" { f, err := os.Open(install.importConfig) if err != nil { return fmt.Errorf("failed to open import config: %w", err) } defer f.Close() importCfg := bufio.NewScanner(f) for importCfg.Scan() { line := importCfg.Text() parts := strings.Split(strings.TrimPrefix(line, "packagefile "), "=") install.compiledPackages[parts[0]] = parts[1] } } return nil } func checkCycle(path []string, next string) ([]string, error) { for i, p := range path { if p == next { return nil, fmt.Errorf("package cycle detected: \n%s", strings.Join(append(path[i:], next), "\n ->")) } } return append(path, next), nil } func (install *PleaseGoInstall) importDir(target string) (*build.Package, error) { pkgDir := install.pkgDir(target) // The package name can differ from the directory it lives in, in which case the parent directory is the one we want if _, err := os.Lstat(pkgDir); os.IsNotExist(err) { pkgDir = filepath.Dir(pkgDir) } return install.buildContext.ImportDir(pkgDir, build.ImportComment) } func (install *PleaseGoInstall) compile(from []string, target string) error { if _, done := install.compiledPackages[target]; done { return nil } fmt.Fprintf(os.Stderr, "Compiling package %s from %v\n", target, from) from, err := checkCycle(from, target) if err != nil { return err } pkg, err := install.importDir(target) if err != nil { return err } for _, i := range pkg.Imports { err := install.compile(from, i) if err != nil { if strings.Contains(err.Error(), "cannot find package") { // Go will fail to find this import and provide a much better message than we can continue } return err } } err = install.compilePackage(target, pkg) if err != nil { return err } return nil } func (install *PleaseGoInstall) prepWorkdir(pkg *build.Package, workDir, out string) error { allSrcs := append(append(pkg.CFiles, pkg.GoFiles...), pkg.HFiles...) if err := install.tc.Exec.Run("mkdir -p %s", workDir); err != nil { return err } if err := install.tc.Exec.Run("mkdir -p %s", filepath.Dir(out)); err != nil { return err } return install.tc.Exec.Run("ln %s %s", toolchain.FullPaths(allSrcs, pkg.Dir), workDir) } // outPath returns the path to the .a for a given package. Unlike go build, please_go install will always output to // the same location regardless of if the package matches the package dir base e.g. example.com/foo will always produce // example.com/foo/foo.a no matter what the package under there is named. // // We can get away with this because we don't compile tests so there must be exactly one package per directory. func outPath(outDir, target string) string { dirName := filepath.Base(target) return filepath.Join(outDir, filepath.Dir(target), dirName, dirName+".a") } func (install *PleaseGoInstall) compilePackage(target string, pkg *build.Package) error { if len(pkg.GoFiles)+len(pkg.CgoFiles) == 0 { return nil } out := outPath(install.outDir, target) workDir := fmt.Sprintf("_build/%s", target) if err := install.prepWorkdir(pkg, workDir, out); err != nil { return fmt.Errorf("failed to prepare working directory for %s: %w", target, err) } goFiles := pkg.GoFiles var objFiles []string ldFlags := pkg.CgoLDFLAGS if len(pkg.CgoFiles) > 0 { cFlags := pkg.CgoCFLAGS if len(pkg.CgoPkgConfig) > 0 { pkgConfCFlags, err := install.tc.PkgConfigCFlags(pkg.CgoPkgConfig) if err != nil { return err } cFlags = append(cFlags, pkgConfCFlags...) pkgConfLDFlags, err := install.tc.PkgConfigLDFlags(pkg.CgoPkgConfig) if err != nil { return err } ldFlags = append(ldFlags, pkgConfLDFlags...) if len(pkgConfLDFlags) > 0 { fmt.Fprintf(os.Stderr, "------ ***** ------ ld flags for %s: %s\n", target, strings.Join(pkgConfLDFlags, " ")) } } cFiles := pkg.CFiles cgoGoFiles, cgoCFiles, err := install.tc.CGO(pkg.Dir, workDir, cFlags, pkg.CgoFiles) if err != nil { return err } goFiles = append(goFiles, cgoGoFiles...) cFiles = append(cFiles, cgoCFiles...) cObjFiles, err := install.tc.CCompile(workDir, cFiles, cFlags) if err != nil { return err } objFiles = append(objFiles, cObjFiles...) } if len(pkg.SFiles) > 0 { asmH, symabis, err := install.tc.Symabis(pkg.Dir, workDir, pkg.SFiles) if err != nil { return err } if err := install.tc.GoAsmCompile(workDir, install.importConfig, out, install.trimPath, goFiles, asmH, symabis); err != nil { return err } asmObjFiles, err := install.tc.Asm(pkg.Dir, workDir, install.trimPath, pkg.SFiles) if err != nil { return err } objFiles = append(objFiles, asmObjFiles...) } else { err := install.tc.GoCompile(workDir, install.importConfig, out, install.trimPath, goFiles) if err != nil { return err } } if len(objFiles) > 0 { err := install.tc.Pack(workDir, out, objFiles) if err != nil { return err } } if err := install.tc.Exec.Run("echo \"packagefile %s=%s\" >> %s", target, out, install.importConfig); err != nil { return err } for _, f := range ldFlags { install.collectedLdFlags[f] = struct{}{} } install.compiledPackages[target] = out return nil }
1
10,112
Feel like this should probably come into this tool as a flag rather than it just grabbing an env var internally?
thought-machine-please
go
@@ -1656,7 +1656,7 @@ class TargetLocator { window(nameOrHandle) { return this.driver_.schedule( new command.Command(command.Name.SWITCH_TO_WINDOW). - setParameter('name', nameOrHandle), + setParameter('handle', nameOrHandle), 'WebDriver.switchTo().window(' + nameOrHandle + ')'); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview The heart of the WebDriver JavaScript API. */ 'use strict'; const actions = require('./actions'); const by = require('./by'); const Capabilities = require('./capabilities').Capabilities; const command = require('./command'); const error = require('./error'); const input = require('./input'); const logging = require('./logging'); const Session = require('./session').Session; const Symbols = require('./symbols'); const promise = require('./promise'); /** * Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait * command}. * * @template OUT */ class Condition { /** * @param {string} message A descriptive error message. Should complete the * sentence "Waiting [...]" * @param {function(!WebDriver): OUT} fn The condition function to * evaluate on each iteration of the wait loop. */ constructor(message, fn) { /** @private {string} */ this.description_ = 'Waiting ' + message; /** @type {function(!WebDriver): OUT} */ this.fn = fn; } /** @return {string} A description of this condition. */ description() { return this.description_; } } /** * Defines a condition that will result in a {@link WebElement}. * * @extends {Condition<!(WebElement|promise.Promise<!WebElement>)>} */ class WebElementCondition extends Condition { /** * @param {string} message A descriptive error message. Should complete the * sentence "Waiting [...]" * @param {function(!WebDriver): !(WebElement|promise.Promise<!WebElement>)} * fn The condition function to evaluate on each iteration of the wait * loop. */ constructor(message, fn) { super(message, fn); } } ////////////////////////////////////////////////////////////////////////////// // // WebDriver // ////////////////////////////////////////////////////////////////////////////// /** * Translates a command to its wire-protocol representation before passing it * to the given `executor` for execution. * @param {!command.Executor} executor The executor to use. * @param {!command.Command} command The command to execute. * @return {!Promise} A promise that will resolve with the command response. */ function executeCommand(executor, command) { return toWireValue(command.getParameters()). then(function(parameters) { command.setParameters(parameters); return executor.execute(command); }); } /** * Converts an object to its JSON representation in the WebDriver wire protocol. * When converting values of type object, the following steps will be taken: * <ol> * <li>if the object is a WebElement, the return value will be the element's * server ID * <li>if the object defines a {@link Symbols.serialize} method, this algorithm * will be recursively applied to the object's serialized representation * <li>if the object provides a "toJSON" function, this algorithm will * recursively be applied to the result of that function * <li>otherwise, the value of each key will be recursively converted according * to the rules above. * </ol> * * @param {*} obj The object to convert. * @return {!Promise<?>} A promise that will resolve to the input value's JSON * representation. */ function toWireValue(obj) { if (promise.isPromise(obj)) { return Promise.resolve(obj).then(toWireValue); } return Promise.resolve(convertValue(obj)); } function convertValue(value) { if (value === void 0 || value === null) { return value; } if (typeof value === 'boolean' || typeof value === 'number' || typeof value === 'string') { return value; } if (Array.isArray(value)) { return convertKeys(value); } if (typeof value === 'function') { return '' + value; } if (typeof value[Symbols.serialize] === 'function') { return toWireValue(value[Symbols.serialize]()); } else if (typeof value.toJSON === 'function') { return toWireValue(value.toJSON()); } return convertKeys(value); } function convertKeys(obj) { const isArray = Array.isArray(obj); const numKeys = isArray ? obj.length : Object.keys(obj).length; const ret = isArray ? new Array(numKeys) : {}; if (!numKeys) { return Promise.resolve(ret); } let numResolved = 0; function forEachKey(obj, fn) { if (Array.isArray(obj)) { for (let i = 0, n = obj.length; i < n; i++) { fn(obj[i], i); } } else { for (let key in obj) { fn(obj[key], key); } } } return new Promise(function(done, reject) { forEachKey(obj, function(value, key) { if (promise.isPromise(value)) { value.then(toWireValue).then(setValue, reject); } else { value = convertValue(value); if (promise.isPromise(value)) { value.then(toWireValue).then(setValue, reject); } else { setValue(value); } } function setValue(value) { ret[key] = value; maybeFulfill(); } }); function maybeFulfill() { if (++numResolved === numKeys) { done(ret); } } }); } /** * Converts a value from its JSON representation according to the WebDriver wire * protocol. Any JSON object that defines a WebElement ID will be decoded to a * {@link WebElement} object. All other values will be passed through as is. * * @param {!WebDriver} driver The driver to use as the parent of any unwrapped * {@link WebElement} values. * @param {*} value The value to convert. * @return {*} The converted value. */ function fromWireValue(driver, value) { if (Array.isArray(value)) { value = value.map(v => fromWireValue(driver, v)); } else if (WebElement.isId(value)) { let id = WebElement.extractId(value); value = new WebElement(driver, id); } else if (value && typeof value === 'object') { let result = {}; for (let key in value) { if (value.hasOwnProperty(key)) { result[key] = fromWireValue(driver, value[key]); } } value = result; } return value; } /** * Creates a new WebDriver client, which provides control over a browser. * * Every command.Command returns a {@link promise.Promise} that * represents the result of that command. Callbacks may be registered on this * object to manipulate the command result or catch an expected error. Any * commands scheduled with a callback are considered sub-commands and will * execute before the next command in the current frame. For example: * * var message = []; * driver.call(message.push, message, 'a').then(function() { * driver.call(message.push, message, 'b'); * }); * driver.call(message.push, message, 'c'); * driver.call(function() { * alert('message is abc? ' + (message.join('') == 'abc')); * }); * */ class WebDriver { /** * @param {!(Session|promise.Promise<!Session>)} session Either a * known session or a promise that will be resolved to a session. * @param {!command.Executor} executor The executor to use when sending * commands to the browser. * @param {promise.ControlFlow=} opt_flow The flow to * schedule commands through. Defaults to the active flow object. */ constructor(session, executor, opt_flow) { /** @private {!promise.Promise<!Session>} */ this.session_ = promise.fulfilled(session); /** @private {!command.Executor} */ this.executor_ = executor; /** @private {!promise.ControlFlow} */ this.flow_ = opt_flow || promise.controlFlow(); /** @private {input.FileDetector} */ this.fileDetector_ = null; } /** * Creates a new WebDriver client for an existing session. * @param {!command.Executor} executor Command executor to use when querying * for session details. * @param {string} sessionId ID of the session to attach to. * @param {promise.ControlFlow=} opt_flow The control flow all * driver commands should execute under. Defaults to the * {@link promise.controlFlow() currently active} control flow. * @return {!WebDriver} A new client for the specified session. */ static attachToSession(executor, sessionId, opt_flow) { let flow = opt_flow || promise.controlFlow(); let cmd = new command.Command(command.Name.DESCRIBE_SESSION) .setParameter('sessionId', sessionId); let session = flow.execute( () => executeCommand(executor, cmd).catch(err => { // The DESCRIBE_SESSION command is not supported by the W3C spec, so // if we get back an unknown command, just return a session with // unknown capabilities. if (err instanceof error.UnknownCommandError) { return new Session(sessionId, new Capabilities); } throw err; }), 'WebDriver.attachToSession()'); return new WebDriver(session, executor, flow); } /** * Creates a new WebDriver session. * * By default, the requested session `capabilities` are merely "desired" and * the remote end will still create a new session even if it cannot satisfy * all of the requested capabilities. You can query which capabilities a * session actually has using the * {@linkplain #getCapabilities() getCapabilities()} method on the returned * WebDriver instance. * * To define _required capabilities_, provide the `capabilities` as an object * literal with `required` and `desired` keys. The `desired` key may be * omitted if all capabilities are required, and vice versa. If the server * cannot create a session with all of the required capabilities, it will * return an {@linkplain error.SessionNotCreatedError}. * * let required = new Capabilities().set('browserName', 'firefox'); * let desired = new Capabilities().set('version', '45'); * let driver = WebDriver.createSession(executor, {required, desired}); * * This function will always return a WebDriver instance. If there is an error * creating the session, such as the aforementioned SessionNotCreatedError, * the driver will have a rejected {@linkplain #getSession session} promise. * It is recommended that this promise is left _unhandled_ so it will * propagate through the {@linkplain promise.ControlFlow control flow} and * cause subsequent commands to fail. * * let required = Capabilities.firefox(); * let driver = WebDriver.createSession(executor, {required}); * * // If the createSession operation failed, then this command will also * // also fail, propagating the creation failure. * driver.get('http://www.google.com').catch(e => console.log(e)); * * @param {!command.Executor} executor The executor to create the new session * with. * @param {(!Capabilities| * {desired: (Capabilities|undefined), * required: (Capabilities|undefined)})} capabilities The desired * capabilities for the new session. * @param {promise.ControlFlow=} opt_flow The control flow all driver * commands should execute under, including the initial session creation. * Defaults to the {@link promise.controlFlow() currently active} * control flow. * @return {!WebDriver} The driver for the newly created session. */ static createSession(executor, capabilities, opt_flow) { let flow = opt_flow || promise.controlFlow(); let cmd = new command.Command(command.Name.NEW_SESSION); if (capabilities && (capabilities.desired || capabilities.required)) { cmd.setParameter('desiredCapabilities', capabilities.desired); cmd.setParameter('requiredCapabilities', capabilities.required); } else { cmd.setParameter('desiredCapabilities', capabilities); } let session = flow.execute( () => executeCommand(executor, cmd), 'WebDriver.createSession()'); return new WebDriver(session, executor, flow); } /** * @return {!promise.ControlFlow} The control flow used by this * instance. */ controlFlow() { return this.flow_; } /** * Schedules a {@link command.Command} to be executed by this driver's * {@link command.Executor}. * * @param {!command.Command} command The command to schedule. * @param {string} description A description of the command for debugging. * @return {!promise.Promise<T>} A promise that will be resolved * with the command result. * @template T */ schedule(command, description) { var self = this; checkHasNotQuit(); command.setParameter('sessionId', this.session_); // If any of the command parameters are rejected promises, those // rejections may be reported as unhandled before the control flow // attempts to execute the command. To ensure parameters errors // propagate through the command itself, we resolve all of the // command parameters now, but suppress any errors until the ControlFlow // actually executes the command. This addresses scenarios like catching // an element not found error in: // // driver.findElement(By.id('foo')).click().catch(function(e) { // if (e instanceof NoSuchElementError) { // // Do something. // } // }); var prepCommand = toWireValue(command.getParameters()); prepCommand.catch(function() {}); var flow = this.flow_; var executor = this.executor_; return flow.execute(function() { // A call to WebDriver.quit() may have been scheduled in the same event // loop as this |command|, which would prevent us from detecting that the // driver has quit above. Therefore, we need to make another quick check. // We still check above so we can fail as early as possible. checkHasNotQuit(); // Retrieve resolved command parameters; any previously suppressed errors // will now propagate up through the control flow as part of the command // execution. return prepCommand.then(function(parameters) { command.setParameters(parameters); return executor.execute(command); }).then(value => fromWireValue(self, value)); }, description); function checkHasNotQuit() { if (!self.session_) { throw new error.NoSuchSessionError( 'This driver instance does not have a valid session ID ' + '(did you call WebDriver.quit()?) and may no longer be ' + 'used.'); } } } /** * Sets the {@linkplain input.FileDetector file detector} that should be * used with this instance. * @param {input.FileDetector} detector The detector to use or {@code null}. */ setFileDetector(detector) { this.fileDetector_ = detector; } /** * @return {!command.Executor} The command executor used by this instance. */ getExecutor() { return this.executor_; } /** * @return {!promise.Promise<!Session>} A promise for this client's * session. */ getSession() { return this.session_; } /** * @return {!promise.Promise<!Capabilities>} A promise * that will resolve with the this instance's capabilities. */ getCapabilities() { return this.session_.then(session => session.getCapabilities()); } /** * Schedules a command to quit the current session. After calling quit, this * instance will be invalidated and may no longer be used to issue commands * against the browser. * @return {!promise.Promise<void>} A promise that will be resolved * when the command has completed. */ quit() { var result = this.schedule( new command.Command(command.Name.QUIT), 'WebDriver.quit()'); // Delete our session ID when the quit command finishes; this will allow us // to throw an error when attemnpting to use a driver post-quit. return result.finally(() => delete this.session_); } /** * Creates a new action sequence using this driver. The sequence will not be * scheduled for execution until {@link actions.ActionSequence#perform} is * called. Example: * * driver.actions(). * mouseDown(element1). * mouseMove(element2). * mouseUp(). * perform(); * * @return {!actions.ActionSequence} A new action sequence for this instance. */ actions() { return new actions.ActionSequence(this); } /** * Creates a new touch sequence using this driver. The sequence will not be * scheduled for execution until {@link actions.TouchSequence#perform} is * called. Example: * * driver.touchActions(). * tap(element1). * doubleTap(element2). * perform(); * * @return {!actions.TouchSequence} A new touch sequence for this instance. */ touchActions() { return new actions.TouchSequence(this); } /** * Schedules a command to execute JavaScript in the context of the currently * selected frame or window. The script fragment will be executed as the body * of an anonymous function. If the script is provided as a function object, * that function will be converted to a string for injection into the target * window. * * Any arguments provided in addition to the script will be included as script * arguments and may be referenced using the {@code arguments} object. * Arguments may be a boolean, number, string, or {@linkplain WebElement}. * Arrays and objects may also be used as script arguments as long as each item * adheres to the types previously mentioned. * * The script may refer to any variables accessible from the current window. * Furthermore, the script will execute in the window's context, thus * {@code document} may be used to refer to the current document. Any local * variables will not be available once the script has finished executing, * though global variables will persist. * * If the script has a return value (i.e. if the script contains a return * statement), then the following steps will be taken for resolving this * functions return value: * * - For a HTML element, the value will resolve to a {@linkplain WebElement} * - Null and undefined return values will resolve to null</li> * - Booleans, numbers, and strings will resolve as is</li> * - Functions will resolve to their string representation</li> * - For arrays and objects, each member item will be converted according to * the rules above * * @param {!(string|Function)} script The script to execute. * @param {...*} var_args The arguments to pass to the script. * @return {!promise.Promise<T>} A promise that will resolve to the * scripts return value. * @template T */ executeScript(script, var_args) { if (typeof script === 'function') { script = 'return (' + script + ').apply(null, arguments);'; } let args = arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : []; return this.schedule( new command.Command(command.Name.EXECUTE_SCRIPT). setParameter('script', script). setParameter('args', args), 'WebDriver.executeScript()'); } /** * Schedules a command to execute asynchronous JavaScript in the context of the * currently selected frame or window. The script fragment will be executed as * the body of an anonymous function. If the script is provided as a function * object, that function will be converted to a string for injection into the * target window. * * Any arguments provided in addition to the script will be included as script * arguments and may be referenced using the {@code arguments} object. * Arguments may be a boolean, number, string, or {@code WebElement}. * Arrays and objects may also be used as script arguments as long as each item * adheres to the types previously mentioned. * * Unlike executing synchronous JavaScript with {@link #executeScript}, * scripts executed with this function must explicitly signal they are finished * by invoking the provided callback. This callback will always be injected * into the executed function as the last argument, and thus may be referenced * with {@code arguments[arguments.length - 1]}. The following steps will be * taken for resolving this functions return value against the first argument * to the script's callback function: * * - For a HTML element, the value will resolve to a * {@link WebElement} * - Null and undefined return values will resolve to null * - Booleans, numbers, and strings will resolve as is * - Functions will resolve to their string representation * - For arrays and objects, each member item will be converted according to * the rules above * * __Example #1:__ Performing a sleep that is synchronized with the currently * selected window: * * var start = new Date().getTime(); * driver.executeAsyncScript( * 'window.setTimeout(arguments[arguments.length - 1], 500);'). * then(function() { * console.log( * 'Elapsed time: ' + (new Date().getTime() - start) + ' ms'); * }); * * __Example #2:__ Synchronizing a test with an AJAX application: * * var button = driver.findElement(By.id('compose-button')); * button.click(); * driver.executeAsyncScript( * 'var callback = arguments[arguments.length - 1];' + * 'mailClient.getComposeWindowWidget().onload(callback);'); * driver.switchTo().frame('composeWidget'); * driver.findElement(By.id('to')).sendKeys('dog@example.com'); * * __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In * this example, the inject script is specified with a function literal. When * using this format, the function is converted to a string for injection, so it * should not reference any symbols not defined in the scope of the page under * test. * * driver.executeAsyncScript(function() { * var callback = arguments[arguments.length - 1]; * var xhr = new XMLHttpRequest(); * xhr.open("GET", "/resource/data.json", true); * xhr.onreadystatechange = function() { * if (xhr.readyState == 4) { * callback(xhr.responseText); * } * }; * xhr.send(''); * }).then(function(str) { * console.log(JSON.parse(str)['food']); * }); * * @param {!(string|Function)} script The script to execute. * @param {...*} var_args The arguments to pass to the script. * @return {!promise.Promise<T>} A promise that will resolve to the * scripts return value. * @template T */ executeAsyncScript(script, var_args) { if (typeof script === 'function') { script = 'return (' + script + ').apply(null, arguments);'; } let args = Array.prototype.slice.call(arguments, 1); return this.schedule( new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT). setParameter('script', script). setParameter('args', args), 'WebDriver.executeScript()'); } /** * Schedules a command to execute a custom function. * @param {function(...): (T|promise.Promise<T>)} fn The function to * execute. * @param {Object=} opt_scope The object in whose scope to execute the function. * @param {...*} var_args Any arguments to pass to the function. * @return {!promise.Promise<T>} A promise that will be resolved' * with the function's result. * @template T */ call(fn, opt_scope, var_args) { let args = Array.prototype.slice.call(arguments, 2); let flow = this.flow_; return flow.execute(function() { return promise.fullyResolved(args).then(function(args) { if (promise.isGenerator(fn)) { args.unshift(fn, opt_scope); return promise.consume.apply(null, args); } return fn.apply(opt_scope, args); }); }, 'WebDriver.call(' + (fn.name || 'function') + ')'); } /** * Schedules a command to wait for a condition to hold. The condition may be * specified by a {@link Condition}, as a custom function, or as any * promise-like thenable. * * For a {@link Condition} or function, the wait will repeatedly * evaluate the condition until it returns a truthy value. If any errors occur * while evaluating the condition, they will be allowed to propagate. In the * event a condition returns a {@link promise.Promise promise}, the polling * loop will wait for it to be resolved and use the resolved value for whether * the condition has been satisified. Note the resolution time for a promise * is factored into whether a wait has timed out. * * Note, if the provided condition is a {@link WebElementCondition}, then * the wait will return a {@link WebElementPromise} that will resolve to the * element that satisified the condition. * * _Example:_ waiting up to 10 seconds for an element to be present on the * page. * * var button = driver.wait(until.elementLocated(By.id('foo')), 10000); * button.click(); * * This function may also be used to block the command flow on the resolution * of any thenable promise object. When given a promise, the command will * simply wait for its resolution before completing. A timeout may be provided * to fail the command if the promise does not resolve before the timeout * expires. * * _Example:_ Suppose you have a function, `startTestServer`, that returns a * promise for when a server is ready for requests. You can block a WebDriver * client on this promise with: * * var started = startTestServer(); * driver.wait(started, 5 * 1000, 'Server should start within 5 seconds'); * driver.get(getServerUrl()); * * @param {!(promise.Promise<T>| * Condition<T>| * function(!WebDriver): T)} condition The condition to * wait on, defined as a promise, condition object, or a function to * evaluate as a condition. * @param {number=} opt_timeout How long to wait for the condition to be true. * @param {string=} opt_message An optional message to use if the wait times * out. * @return {!(promise.Promise<T>|WebElementPromise)} A promise that will be * resolved with the first truthy value returned by the condition * function, or rejected if the condition times out. If the input * input condition is an instance of a {@link WebElementCondition}, * the returned value will be a {@link WebElementPromise}. * @template T */ wait(condition, opt_timeout, opt_message) { if (promise.isPromise(condition)) { return this.flow_.wait( /** @type {!promise.Promise} */(condition), opt_timeout, opt_message); } var message = opt_message; var fn = /** @type {!Function} */(condition); if (condition instanceof Condition) { message = message || condition.description(); fn = condition.fn; } var driver = this; var result = this.flow_.wait(function() { if (promise.isGenerator(fn)) { return promise.consume(fn, null, [driver]); } return fn(driver); }, opt_timeout, message); if (condition instanceof WebElementCondition) { result = new WebElementPromise(this, result.then(function(value) { if (!(value instanceof WebElement)) { throw TypeError( 'WebElementCondition did not resolve to a WebElement: ' + Object.prototype.toString.call(value)); } return value; })); } return result; } /** * Schedules a command to make the driver sleep for the given amount of time. * @param {number} ms The amount of time, in milliseconds, to sleep. * @return {!promise.Promise<void>} A promise that will be resolved * when the sleep has finished. */ sleep(ms) { return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')'); } /** * Schedules a command to retrieve the current window handle. * @return {!promise.Promise<string>} A promise that will be * resolved with the current window handle. */ getWindowHandle() { return this.schedule( new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE), 'WebDriver.getWindowHandle()'); } /** * Schedules a command to retrieve the current list of available window handles. * @return {!promise.Promise.<!Array<string>>} A promise that will * be resolved with an array of window handles. */ getAllWindowHandles() { return this.schedule( new command.Command(command.Name.GET_WINDOW_HANDLES), 'WebDriver.getAllWindowHandles()'); } /** * Schedules a command to retrieve the current page's source. The page source * returned is a representation of the underlying DOM: do not expect it to be * formatted or escaped in the same way as the response sent from the web * server. * @return {!promise.Promise<string>} A promise that will be * resolved with the current page source. */ getPageSource() { return this.schedule( new command.Command(command.Name.GET_PAGE_SOURCE), 'WebDriver.getPageSource()'); } /** * Schedules a command to close the current window. * @return {!promise.Promise<void>} A promise that will be resolved * when this command has completed. */ close() { return this.schedule(new command.Command(command.Name.CLOSE), 'WebDriver.close()'); } /** * Schedules a command to navigate to the given URL. * @param {string} url The fully qualified URL to open. * @return {!promise.Promise<void>} A promise that will be resolved * when the document has finished loading. */ get(url) { return this.navigate().to(url); } /** * Schedules a command to retrieve the URL of the current page. * @return {!promise.Promise<string>} A promise that will be * resolved with the current URL. */ getCurrentUrl() { return this.schedule( new command.Command(command.Name.GET_CURRENT_URL), 'WebDriver.getCurrentUrl()'); } /** * Schedules a command to retrieve the current page's title. * @return {!promise.Promise<string>} A promise that will be * resolved with the current page's title. */ getTitle() { return this.schedule(new command.Command(command.Name.GET_TITLE), 'WebDriver.getTitle()'); } /** * Schedule a command to find an element on the page. If the element cannot be * found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned * by the driver. Unlike other commands, this error cannot be suppressed. In * other words, scheduling a command to find an element doubles as an assert * that the element is present on the page. To test whether an element is * present on the page, use {@link #isElementPresent} instead. * * The search criteria for an element may be defined using one of the * factories in the {@link webdriver.By} namespace, or as a short-hand * {@link webdriver.By.Hash} object. For example, the following two statements * are equivalent: * * var e1 = driver.findElement(By.id('foo')); * var e2 = driver.findElement({id:'foo'}); * * You may also provide a custom locator function, which takes as input this * instance and returns a {@link WebElement}, or a promise that will resolve * to a WebElement. If the returned promise resolves to an array of * WebElements, WebDriver will use the first element. For example, to find the * first visible link on a page, you could write: * * var link = driver.findElement(firstVisibleLink); * * function firstVisibleLink(driver) { * var links = driver.findElements(By.tagName('a')); * return promise.filter(links, function(link) { * return link.isDisplayed(); * }); * } * * @param {!(by.By|Function)} locator The locator to use. * @return {!WebElementPromise} A WebElement that can be used to issue * commands against the located element. If the element is not found, the * element will be invalidated and all scheduled commands aborted. */ findElement(locator) { let id; locator = by.checkedLocator(locator); if (typeof locator === 'function') { id = this.findElementInternal_(locator, this); } else { let cmd = new command.Command(command.Name.FIND_ELEMENT). setParameter('using', locator.using). setParameter('value', locator.value); id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')'); } return new WebElementPromise(this, id); } /** * @param {!Function} locatorFn The locator function to use. * @param {!(WebDriver|WebElement)} context The search * context. * @return {!promise.Promise.<!WebElement>} A * promise that will resolve to a list of WebElements. * @private */ findElementInternal_(locatorFn, context) { return this.call(() => locatorFn(context)).then(function(result) { if (Array.isArray(result)) { result = result[0]; } if (!(result instanceof WebElement)) { throw new TypeError('Custom locator did not return a WebElement'); } return result; }); } /** * Schedule a command to search for multiple elements on the page. * * @param {!(by.By|Function)} locator The locator to use. * @return {!promise.Promise.<!Array.<!WebElement>>} A * promise that will resolve to an array of WebElements. */ findElements(locator) { locator = by.checkedLocator(locator); if (typeof locator === 'function') { return this.findElementsInternal_(locator, this); } else { let cmd = new command.Command(command.Name.FIND_ELEMENTS). setParameter('using', locator.using). setParameter('value', locator.value); let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')'); return res.catch(function(e) { if (e instanceof error.NoSuchElementError) { return []; } throw e; }); } } /** * @param {!Function} locatorFn The locator function to use. * @param {!(WebDriver|WebElement)} context The search context. * @return {!promise.Promise<!Array<!WebElement>>} A promise that * will resolve to an array of WebElements. * @private */ findElementsInternal_(locatorFn, context) { return this.call(() => locatorFn(context)).then(function(result) { if (result instanceof WebElement) { return [result]; } if (!Array.isArray(result)) { return []; } return result.filter(function(item) { return item instanceof WebElement; }); }); } /** * Schedule a command to take a screenshot. The driver makes a best effort to * return a screenshot of the following, in order of preference: * * 1. Entire page * 2. Current window * 3. Visible portion of the current frame * 4. The entire display containing the browser * * @return {!promise.Promise<string>} A promise that will be * resolved to the screenshot as a base-64 encoded PNG. */ takeScreenshot() { return this.schedule(new command.Command(command.Name.SCREENSHOT), 'WebDriver.takeScreenshot()'); } /** * @return {!Options} The options interface for this instance. */ manage() { return new Options(this); } /** * @return {!Navigation} The navigation interface for this instance. */ navigate() { return new Navigation(this); } /** * @return {!TargetLocator} The target locator interface for this * instance. */ switchTo() { return new TargetLocator(this); } } /** * Interface for navigating back and forth in the browser history. * * This class should never be instantiated directly. Insead, obtain an instance * with * * webdriver.navigate() * * @see WebDriver#navigate() */ class Navigation { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver; } /** * Schedules a command to navigate to a new URL. * @param {string} url The URL to navigate to. * @return {!promise.Promise<void>} A promise that will be resolved * when the URL has been loaded. */ to(url) { return this.driver_.schedule( new command.Command(command.Name.GET). setParameter('url', url), 'WebDriver.navigate().to(' + url + ')'); } /** * Schedules a command to move backwards in the browser history. * @return {!promise.Promise<void>} A promise that will be resolved * when the navigation event has completed. */ back() { return this.driver_.schedule( new command.Command(command.Name.GO_BACK), 'WebDriver.navigate().back()'); } /** * Schedules a command to move forwards in the browser history. * @return {!promise.Promise<void>} A promise that will be resolved * when the navigation event has completed. */ forward() { return this.driver_.schedule( new command.Command(command.Name.GO_FORWARD), 'WebDriver.navigate().forward()'); } /** * Schedules a command to refresh the current page. * @return {!promise.Promise<void>} A promise that will be resolved * when the navigation event has completed. */ refresh() { return this.driver_.schedule( new command.Command(command.Name.REFRESH), 'WebDriver.navigate().refresh()'); } } /** * Provides methods for managing browser and driver state. * * This class should never be instantiated directly. Insead, obtain an instance * with {@linkplain WebDriver#manage() webdriver.manage()}. */ class Options { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver; } /** * Schedules a command to add a cookie. * * __Sample Usage:__ * * // Set a basic cookie. * driver.options().addCookie({name: 'foo', value: 'bar'}); * * // Set a cookie that expires in 10 minutes. * let expiry = new Date(Date.now() + (10 * 60 * 1000)); * driver.options().addCookie({name: 'foo', value: 'bar', expiry}); * * // The cookie expiration may also be specified in seconds since epoch. * driver.options().addCookie({ * name: 'foo', * value: 'bar', * expiry: Math.floor(Date.now() / 1000) * }); * * @param {!Options.Cookie} spec Defines the cookie to add. * @return {!promise.Promise<void>} A promise that will be resolved * when the cookie has been added to the page. * @throws {error.InvalidArgumentError} if any of the cookie parameters are * invalid. * @throws {TypeError} if `spec` is not a cookie object. */ addCookie(spec) { if (!spec || typeof spec !== 'object') { throw TypeError('addCookie called with non-cookie parameter'); } // We do not allow '=' or ';' in the name. let name = spec.name; if (/[;=]/.test(name)) { throw new error.InvalidArgumentError( 'Invalid cookie name "' + name + '"'); } // We do not allow ';' in value. let value = spec.value; if (/;/.test(value)) { throw new error.InvalidArgumentError( 'Invalid cookie value "' + value + '"'); } let cookieString = name + '=' + value + (spec.domain ? ';domain=' + spec.domain : '') + (spec.path ? ';path=' + spec.path : '') + (spec.secure ? ';secure' : ''); let expiry; if (typeof spec.expiry === 'number') { expiry = Math.floor(spec.expiry); cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString(); } else if (spec.expiry instanceof Date) { let date = /** @type {!Date} */(spec.expiry); expiry = Math.floor(date.getTime() / 1000); cookieString += ';expires=' + date.toUTCString(); } return this.driver_.schedule( new command.Command(command.Name.ADD_COOKIE). setParameter('cookie', { 'name': name, 'value': value, 'path': spec.path, 'domain': spec.domain, 'secure': !!spec.secure, 'expiry': expiry }), 'WebDriver.manage().addCookie(' + cookieString + ')'); } /** * Schedules a command to delete all cookies visible to the current page. * @return {!promise.Promise<void>} A promise that will be resolved * when all cookies have been deleted. */ deleteAllCookies() { return this.driver_.schedule( new command.Command(command.Name.DELETE_ALL_COOKIES), 'WebDriver.manage().deleteAllCookies()'); } /** * Schedules a command to delete the cookie with the given name. This command * is a no-op if there is no cookie with the given name visible to the current * page. * @param {string} name The name of the cookie to delete. * @return {!promise.Promise<void>} A promise that will be resolved * when the cookie has been deleted. */ deleteCookie(name) { return this.driver_.schedule( new command.Command(command.Name.DELETE_COOKIE). setParameter('name', name), 'WebDriver.manage().deleteCookie(' + name + ')'); } /** * Schedules a command to retrieve all cookies visible to the current page. * Each cookie will be returned as a JSON object as described by the WebDriver * wire protocol. * @return {!promise.Promise<!Array<!Options.Cookie>>} A promise that will be * resolved with the cookies visible to the current browsing context. */ getCookies() { return this.driver_.schedule( new command.Command(command.Name.GET_ALL_COOKIES), 'WebDriver.manage().getCookies()'); } /** * Schedules a command to retrieve the cookie with the given name. Returns null * if there is no such cookie. The cookie will be returned as a JSON object as * described by the WebDriver wire protocol. * * @param {string} name The name of the cookie to retrieve. * @return {!promise.Promise<?Options.Cookie>} A promise that will be resolved * with the named cookie, or `null` if there is no such cookie. */ getCookie(name) { return this.getCookies().then(function(cookies) { for (let cookie of cookies) { if (cookie && cookie['name'] === name) { return cookie; } } return null; }); } /** * @return {!Logs} The interface for managing driver * logs. */ logs() { return new Logs(this.driver_); } /** * @return {!Timeouts} The interface for managing driver timeouts. */ timeouts() { return new Timeouts(this.driver_); } /** * @return {!Window} The interface for managing the current window. */ window() { return new Window(this.driver_); } } /** * A record object describing a browser cookie. * * @record */ Options.Cookie = function() {}; /** * The name of the cookie. * * @type {string} */ Options.Cookie.prototype.name; /** * The cookie value. * * @type {string} */ Options.Cookie.prototype.value; /** * The cookie path. Defaults to "/" when adding a cookie. * * @type {(string|undefined)} */ Options.Cookie.prototype.path; /** * The domain the cookie is visible to. Defaults to the current browsing * context's document's URL when adding a cookie. * * @type {(string|undefined)} */ Options.Cookie.prototype.domain; /** * Whether the cookie is a secure cookie. Defaults to false when adding a new * cookie. * * @type {(boolean|undefined)} */ Options.Cookie.prototype.secure; /** * Whether the cookie is an HTTP only cookie. Defaults to false when adding a * new cookie. * * @type {(boolean|undefined)} */ Options.Cookie.prototype.httpOnly; /** * When the cookie expires. * * When {@linkplain Options#addCookie() adding a cookie}, this may be specified * in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to * 20 years in the future if omitted. * * The expiry is always returned in seconds since epoch when * {@linkplain Options#getCookies() retrieving cookies} from the browser. * * @type {(!Date|number|undefined)} */ Options.Cookie.prototype.expiry; /** * An interface for managing timeout behavior for WebDriver instances. * * This class should never be instantiated directly. Insead, obtain an instance * with * * webdriver.manage().timeouts() * * @see WebDriver#manage() * @see Options#timeouts() */ class Timeouts { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver; } /** * Specifies the amount of time the driver should wait when searching for an * element if it is not immediately present. * * When searching for a single element, the driver should poll the page * until the element has been found, or this timeout expires before failing * with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching * for multiple elements, the driver should poll the page until at least one * element has been found or this timeout has expired. * * Setting the wait timeout to 0 (its default value), disables implicit * waiting. * * Increasing the implicit wait timeout should be used judiciously as it * will have an adverse effect on test run time, especially when used with * slower location strategies like XPath. * * @param {number} ms The amount of time to wait, in milliseconds. * @return {!promise.Promise<void>} A promise that will be resolved * when the implicit wait timeout has been set. */ implicitlyWait(ms) { return this._scheduleCommand(ms, 'implicit', 'implicitlyWait'); } /** * Sets the amount of time to wait, in milliseconds, for an asynchronous * script to finish execution before returning an error. If the timeout is * less than or equal to 0, the script will be allowed to run indefinitely. * * @param {number} ms The amount of time to wait, in milliseconds. * @return {!promise.Promise<void>} A promise that will be resolved * when the script timeout has been set. */ setScriptTimeout(ms) { return this._scheduleCommand(ms, 'script', 'setScriptTimeout'); } /** * Sets the amount of time to wait for a page load to complete before * returning an error. If the timeout is negative, page loads may be * indefinite. * * @param {number} ms The amount of time to wait, in milliseconds. * @return {!promise.Promise<void>} A promise that will be resolved * when the timeout has been set. */ pageLoadTimeout(ms) { return this._scheduleCommand(ms, 'page load', 'pageLoadTimeout'); } _scheduleCommand(ms, timeoutIdentifier, timeoutName) { return this.driver_.schedule( new command.Command(command.Name.SET_TIMEOUT). setParameter('type', timeoutIdentifier). setParameter('ms', ms), `WebDriver.manage().timeouts().${timeoutName}(${ms})`); } } /** * An interface for managing the current window. * * This class should never be instantiated directly. Insead, obtain an instance * with * * webdriver.manage().window() * * @see WebDriver#manage() * @see Options#window() */ class Window { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver; } /** * Retrieves the window's current position, relative to the top left corner of * the screen. * @return {!promise.Promise.<{x: number, y: number}>} A promise * that will be resolved with the window's position in the form of a * {x:number, y:number} object literal. */ getPosition() { return this.driver_.schedule( new command.Command(command.Name.GET_WINDOW_POSITION). setParameter('windowHandle', 'current'), 'WebDriver.manage().window().getPosition()'); } /** * Repositions the current window. * @param {number} x The desired horizontal position, relative to the left * side of the screen. * @param {number} y The desired vertical position, relative to the top of the * of the screen. * @return {!promise.Promise<void>} A promise that will be resolved * when the command has completed. */ setPosition(x, y) { return this.driver_.schedule( new command.Command(command.Name.SET_WINDOW_POSITION). setParameter('windowHandle', 'current'). setParameter('x', x). setParameter('y', y), 'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')'); } /** * Retrieves the window's current size. * @return {!promise.Promise<{width: number, height: number}>} A * promise that will be resolved with the window's size in the form of a * {width:number, height:number} object literal. */ getSize() { return this.driver_.schedule( new command.Command(command.Name.GET_WINDOW_SIZE). setParameter('windowHandle', 'current'), 'WebDriver.manage().window().getSize()'); } /** * Resizes the current window. * @param {number} width The desired window width. * @param {number} height The desired window height. * @return {!promise.Promise<void>} A promise that will be resolved * when the command has completed. */ setSize(width, height) { return this.driver_.schedule( new command.Command(command.Name.SET_WINDOW_SIZE). setParameter('windowHandle', 'current'). setParameter('width', width). setParameter('height', height), 'WebDriver.manage().window().setSize(' + width + ', ' + height + ')'); } /** * Maximizes the current window. * @return {!promise.Promise<void>} A promise that will be resolved * when the command has completed. */ maximize() { return this.driver_.schedule( new command.Command(command.Name.MAXIMIZE_WINDOW). setParameter('windowHandle', 'current'), 'WebDriver.manage().window().maximize()'); } } /** * Interface for managing WebDriver log records. * * This class should never be instantiated directly. Instead, obtain an * instance with * * webdriver.manage().logs() * * @see WebDriver#manage() * @see Options#logs() */ class Logs { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver; } /** * Fetches available log entries for the given type. * * Note that log buffers are reset after each call, meaning that available * log entries correspond to those entries not yet returned for a given log * type. In practice, this means that this call will return the available log * entries since the last call, or from the start of the session. * * @param {!logging.Type} type The desired log type. * @return {!promise.Promise.<!Array.<!logging.Entry>>} A * promise that will resolve to a list of log entries for the specified * type. */ get(type) { let cmd = new command.Command(command.Name.GET_LOG). setParameter('type', type); return this.driver_.schedule( cmd, 'WebDriver.manage().logs().get(' + type + ')'). then(function(entries) { return entries.map(function(entry) { if (!(entry instanceof logging.Entry)) { return new logging.Entry( entry['level'], entry['message'], entry['timestamp'], entry['type']); } return entry; }); }); } /** * Retrieves the log types available to this driver. * @return {!promise.Promise<!Array<!logging.Type>>} A * promise that will resolve to a list of available log types. */ getAvailableLogTypes() { return this.driver_.schedule( new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES), 'WebDriver.manage().logs().getAvailableLogTypes()'); } } /** * An interface for changing the focus of the driver to another frame or window. * * This class should never be instantiated directly. Instead, obtain an * instance with * * webdriver.switchTo() * * @see WebDriver#switchTo() */ class TargetLocator { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver; } /** * Schedules a command retrieve the {@code document.activeElement} element on * the current document, or {@code document.body} if activeElement is not * available. * @return {!WebElementPromise} The active element. */ activeElement() { var id = this.driver_.schedule( new command.Command(command.Name.GET_ACTIVE_ELEMENT), 'WebDriver.switchTo().activeElement()'); return new WebElementPromise(this.driver_, id); } /** * Schedules a command to switch focus of all future commands to the topmost * frame on the page. * @return {!promise.Promise<void>} A promise that will be resolved * when the driver has changed focus to the default content. */ defaultContent() { return this.driver_.schedule( new command.Command(command.Name.SWITCH_TO_FRAME). setParameter('id', null), 'WebDriver.switchTo().defaultContent()'); } /** * Schedules a command to switch the focus of all future commands to another * frame on the page. The target frame may be specified as one of the * following: * * - A number that specifies a (zero-based) index into [window.frames]( * https://developer.mozilla.org/en-US/docs/Web/API/Window.frames). * - A {@link WebElement} reference, which correspond to a `frame` or `iframe` * DOM element. * - The `null` value, to select the topmost frame on the page. Passing `null` * is the same as calling {@link #defaultContent defaultContent()}. * * If the specified frame can not be found, the returned promise will be * rejected with a {@linkplain error.NoSuchFrameError}. * * @param {(number|WebElement|null)} id The frame locator. * @return {!promise.Promise<void>} A promise that will be resolved * when the driver has changed focus to the specified frame. */ frame(id) { return this.driver_.schedule( new command.Command(command.Name.SWITCH_TO_FRAME). setParameter('id', id), 'WebDriver.switchTo().frame(' + id + ')'); } /** * Schedules a command to switch the focus of all future commands to another * window. Windows may be specified by their {@code window.name} attribute or * by its handle (as returned by {@link WebDriver#getWindowHandles}). * * If the specified window cannot be found, the returned promise will be * rejected with a {@linkplain error.NoSuchWindowError}. * * @param {string} nameOrHandle The name or window handle of the window to * switch focus to. * @return {!promise.Promise<void>} A promise that will be resolved * when the driver has changed focus to the specified window. */ window(nameOrHandle) { return this.driver_.schedule( new command.Command(command.Name.SWITCH_TO_WINDOW). setParameter('name', nameOrHandle), 'WebDriver.switchTo().window(' + nameOrHandle + ')'); } /** * Schedules a command to change focus to the active modal dialog, such as * those opened by `window.alert()`, `window.confirm()`, and * `window.prompt()`. The returned promise will be rejected with a * {@linkplain error.NoSuchAlertError} if there are no open alerts. * * @return {!AlertPromise} The open alert. */ alert() { var text = this.driver_.schedule( new command.Command(command.Name.GET_ALERT_TEXT), 'WebDriver.switchTo().alert()'); var driver = this.driver_; return new AlertPromise(driver, text.then(function(text) { return new Alert(driver, text); })); } } ////////////////////////////////////////////////////////////////////////////// // // WebElement // ////////////////////////////////////////////////////////////////////////////// const LEGACY_ELEMENT_ID_KEY = 'ELEMENT'; const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf'; /** * Represents a DOM element. WebElements can be found by searching from the * document root using a {@link WebDriver} instance, or by searching * under another WebElement: * * driver.get('http://www.google.com'); * var searchForm = driver.findElement(By.tagName('form')); * var searchBox = searchForm.findElement(By.name('q')); * searchBox.sendKeys('webdriver'); */ class WebElement { /** * @param {!WebDriver} driver the parent WebDriver instance for this element. * @param {(!IThenable<string>|string)} id The server-assigned opaque ID for * the underlying DOM element. */ constructor(driver, id) { /** @private {!WebDriver} */ this.driver_ = driver; /** @private {!promise.Promise<string>} */ this.id_ = promise.fulfilled(id); } /** * @param {string} id The raw ID. * @param {boolean=} opt_noLegacy Whether to exclude the legacy element key. * @return {!Object} The element ID for use with WebDriver's wire protocol. */ static buildId(id, opt_noLegacy) { return opt_noLegacy ? {[ELEMENT_ID_KEY]: id} : {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id}; } /** * Extracts the encoded WebElement ID from the object. * * @param {?} obj The object to extract the ID from. * @return {string} the extracted ID. * @throws {TypeError} if the object is not a valid encoded ID. */ static extractId(obj) { if (obj && typeof obj === 'object') { if (typeof obj[ELEMENT_ID_KEY] === 'string') { return obj[ELEMENT_ID_KEY]; } else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') { return obj[LEGACY_ELEMENT_ID_KEY]; } } throw new TypeError('object is not a WebElement ID'); } /** * @param {?} obj the object to test. * @return {boolean} whether the object is a valid encoded WebElement ID. */ static isId(obj) { return obj && typeof obj === 'object' && (typeof obj[ELEMENT_ID_KEY] === 'string' || typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string'); } /** * Compares two WebElements for equality. * * @param {!WebElement} a A WebElement. * @param {!WebElement} b A WebElement. * @return {!promise.Promise<boolean>} A promise that will be * resolved to whether the two WebElements are equal. */ static equals(a, b) { if (a === b) { return promise.fulfilled(true); } let ids = [a.getId(), b.getId()]; return promise.all(ids).then(function(ids) { // If the two element's have the same ID, they should be considered // equal. Otherwise, they may still be equivalent, but we'll need to // ask the server to check for us. if (ids[0] === ids[1]) { return true; } let cmd = new command.Command(command.Name.ELEMENT_EQUALS); cmd.setParameter('id', ids[0]); cmd.setParameter('other', ids[1]); return a.driver_.schedule(cmd, 'WebElement.equals()'); }); } /** @return {!WebDriver} The parent driver for this instance. */ getDriver() { return this.driver_; } /** * @return {!promise.Promise<string>} A promise that resolves to * the server-assigned opaque ID assigned to this element. */ getId() { return this.id_; } /** * @return {!Object} Returns the serialized representation of this WebElement. */ [Symbols.serialize]() { return this.getId().then(WebElement.buildId); } /** * Schedules a command that targets this element with the parent WebDriver * instance. Will ensure this element's ID is included in the command * parameters under the "id" key. * * @param {!command.Command} command The command to schedule. * @param {string} description A description of the command for debugging. * @return {!promise.Promise<T>} A promise that will be resolved * with the command result. * @template T * @see WebDriver#schedule * @private */ schedule_(command, description) { command.setParameter('id', this.getId()); return this.driver_.schedule(command, description); } /** * Schedule a command to find a descendant of this element. If the element * cannot be found, the returned promise will be rejected with a * {@linkplain error.NoSuchElementError NoSuchElementError}. * * The search criteria for an element may be defined using one of the static * factories on the {@link by.By} class, or as a short-hand * {@link ./by.ByHash} object. For example, the following two statements * are equivalent: * * var e1 = element.findElement(By.id('foo')); * var e2 = element.findElement({id:'foo'}); * * You may also provide a custom locator function, which takes as input this * instance and returns a {@link WebElement}, or a promise that will resolve * to a WebElement. If the returned promise resolves to an array of * WebElements, WebDriver will use the first element. For example, to find the * first visible link on a page, you could write: * * var link = element.findElement(firstVisibleLink); * * function firstVisibleLink(element) { * var links = element.findElements(By.tagName('a')); * return promise.filter(links, function(link) { * return link.isDisplayed(); * }); * } * * @param {!(by.By|Function)} locator The locator strategy to use when * searching for the element. * @return {!WebElementPromise} A WebElement that can be used to issue * commands against the located element. If the element is not found, the * element will be invalidated and all scheduled commands aborted. */ findElement(locator) { locator = by.checkedLocator(locator); let id; if (typeof locator === 'function') { id = this.driver_.findElementInternal_(locator, this); } else { let cmd = new command.Command( command.Name.FIND_CHILD_ELEMENT). setParameter('using', locator.using). setParameter('value', locator.value); id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')'); } return new WebElementPromise(this.driver_, id); } /** * Schedules a command to find all of the descendants of this element that * match the given search criteria. * * @param {!(by.By|Function)} locator The locator strategy to use when * searching for the element. * @return {!promise.Promise<!Array<!WebElement>>} A * promise that will resolve to an array of WebElements. */ findElements(locator) { locator = by.checkedLocator(locator); let id; if (typeof locator === 'function') { return this.driver_.findElementsInternal_(locator, this); } else { var cmd = new command.Command( command.Name.FIND_CHILD_ELEMENTS). setParameter('using', locator.using). setParameter('value', locator.value); return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')'); } } /** * Schedules a command to click on this element. * @return {!promise.Promise<void>} A promise that will be resolved * when the click command has completed. */ click() { return this.schedule_( new command.Command(command.Name.CLICK_ELEMENT), 'WebElement.click()'); } /** * Schedules a command to type a sequence on the DOM element represented by * this instance. * * Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is * processed in the keysequence, that key state is toggled until one of the * following occurs: * * - The modifier key is encountered again in the sequence. At this point the * state of the key is toggled (along with the appropriate keyup/down * events). * - The {@link input.Key.NULL} key is encountered in the sequence. When * this key is encountered, all modifier keys current in the down state are * released (with accompanying keyup events). The NULL key can be used to * simulate common keyboard shortcuts: * * element.sendKeys("text was", * Key.CONTROL, "a", Key.NULL, * "now text is"); * // Alternatively: * element.sendKeys("text was", * Key.chord(Key.CONTROL, "a"), * "now text is"); * * - The end of the keysequence is encountered. When there are no more keys * to type, all depressed modifier keys are released (with accompanying * keyup events). * * If this element is a file input ({@code <input type="file">}), the * specified key sequence should specify the path to the file to attach to * the element. This is analgous to the user clicking "Browse..." and entering * the path into the file select dialog. * * var form = driver.findElement(By.css('form')); * var element = form.findElement(By.css('input[type=file]')); * element.sendKeys('/path/to/file.txt'); * form.submit(); * * For uploads to function correctly, the entered path must reference a file * on the _browser's_ machine, not the local machine running this script. When * running against a remote Selenium server, a {@link input.FileDetector} * may be used to transparently copy files to the remote machine before * attempting to upload them in the browser. * * __Note:__ On browsers where native keyboard events are not supported * (e.g. Firefox on OS X), key events will be synthesized. Special * punctionation keys will be synthesized according to a standard QWERTY en-us * keyboard layout. * * @param {...(number|string|!IThenable<(number|string)>)} var_args The * sequence of keys to type. Number keys may be referenced numerically or * by string (1 or '1'). All arguments will be joined into a single * sequence. * @return {!promise.Promise<void>} A promise that will be resolved * when all keys have been typed. */ sendKeys(var_args) { let keys = Promise.all(Array.prototype.slice.call(arguments, 0)). then(keys => { let ret = []; keys.forEach(key => { let type = typeof key; if (type === 'number') { key = String(key); } else if (type !== 'string') { throw TypeError( 'each key must be a number of string; got ' + type); } // The W3C protocol requires keys to be specified as an array where // each element is a single key. ret.push.apply(ret, key.split('')); }); return ret; }); if (!this.driver_.fileDetector_) { return this.schedule_( new command.Command(command.Name.SEND_KEYS_TO_ELEMENT). setParameter('value', keys), 'WebElement.sendKeys()'); } // Suppress unhandled rejection errors until the flow executes the command. keys.catch(function() {}); var element = this; return this.driver_.flow_.execute(function() { return keys.then(function(keys) { return element.driver_.fileDetector_ .handleFile(element.driver_, keys.join('')); }).then(function(keys) { return element.schedule_( new command.Command(command.Name.SEND_KEYS_TO_ELEMENT). setParameter('value', keys.split('')), 'WebElement.sendKeys()'); }); }, 'WebElement.sendKeys()'); } /** * Schedules a command to query for the tag/node name of this element. * @return {!promise.Promise<string>} A promise that will be * resolved with the element's tag name. */ getTagName() { return this.schedule_( new command.Command(command.Name.GET_ELEMENT_TAG_NAME), 'WebElement.getTagName()'); } /** * Schedules a command to query for the computed style of the element * represented by this instance. If the element inherits the named style from * its parent, the parent will be queried for its value. Where possible, color * values will be converted to their hex representation (e.g. #00ff00 instead * of rgb(0, 255, 0)). * * _Warning:_ the value returned will be as the browser interprets it, so * it may be tricky to form a proper assertion. * * @param {string} cssStyleProperty The name of the CSS style property to look * up. * @return {!promise.Promise<string>} A promise that will be * resolved with the requested CSS value. */ getCssValue(cssStyleProperty) { var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY; return this.schedule_( new command.Command(name). setParameter('propertyName', cssStyleProperty), 'WebElement.getCssValue(' + cssStyleProperty + ')'); } /** * Schedules a command to query for the value of the given attribute of the * element. Will return the current value, even if it has been modified after * the page has been loaded. More exactly, this method will return the value * of the given attribute, unless that attribute is not present, in which case * the value of the property with the same name is returned. If neither value * is set, null is returned (for example, the "value" property of a textarea * element). The "style" attribute is converted as best can be to a * text representation with a trailing semi-colon. The following are deemed to * be "boolean" attributes and will return either "true" or null: * * async, autofocus, autoplay, checked, compact, complete, controls, declare, * defaultchecked, defaultselected, defer, disabled, draggable, ended, * formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope, * loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open, * paused, pubdate, readonly, required, reversed, scoped, seamless, seeking, * selected, spellcheck, truespeed, willvalidate * * Finally, the following commonly mis-capitalized attribute/property names * are evaluated as expected: * * - "class" * - "readonly" * * @param {string} attributeName The name of the attribute to query. * @return {!promise.Promise<?string>} A promise that will be * resolved with the attribute's value. The returned value will always be * either a string or null. */ getAttribute(attributeName) { return this.schedule_( new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE). setParameter('name', attributeName), 'WebElement.getAttribute(' + attributeName + ')'); } /** * Get the visible (i.e. not hidden by CSS) innerText of this element, * including sub-elements, without any leading or trailing whitespace. * * @return {!promise.Promise<string>} A promise that will be * resolved with the element's visible text. */ getText() { return this.schedule_( new command.Command(command.Name.GET_ELEMENT_TEXT), 'WebElement.getText()'); } /** * Schedules a command to compute the size of this element's bounding box, in * pixels. * @return {!promise.Promise.<{width: number, height: number}>} A * promise that will be resolved with the element's size as a * {@code {width:number, height:number}} object. */ getSize() { return this.schedule_( new command.Command(command.Name.GET_ELEMENT_SIZE), 'WebElement.getSize()'); } /** * Schedules a command to compute the location of this element in page space. * @return {!promise.Promise.<{x: number, y: number}>} A promise that * will be resolved to the element's location as a * {@code {x:number, y:number}} object. */ getLocation() { return this.schedule_( new command.Command(command.Name.GET_ELEMENT_LOCATION), 'WebElement.getLocation()'); } /** * Schedules a command to query whether the DOM element represented by this * instance is enabled, as dicted by the {@code disabled} attribute. * @return {!promise.Promise<boolean>} A promise that will be * resolved with whether this element is currently enabled. */ isEnabled() { return this.schedule_( new command.Command(command.Name.IS_ELEMENT_ENABLED), 'WebElement.isEnabled()'); } /** * Schedules a command to query whether this element is selected. * @return {!promise.Promise<boolean>} A promise that will be * resolved with whether this element is currently selected. */ isSelected() { return this.schedule_( new command.Command(command.Name.IS_ELEMENT_SELECTED), 'WebElement.isSelected()'); } /** * Schedules a command to submit the form containing this element (or this * element if it is a FORM element). This command is a no-op if the element is * not contained in a form. * @return {!promise.Promise<void>} A promise that will be resolved * when the form has been submitted. */ submit() { return this.schedule_( new command.Command(command.Name.SUBMIT_ELEMENT), 'WebElement.submit()'); } /** * Schedules a command to clear the `value` of this element. This command has * no effect if the underlying DOM element is neither a text INPUT element * nor a TEXTAREA element. * @return {!promise.Promise<void>} A promise that will be resolved * when the element has been cleared. */ clear() { return this.schedule_( new command.Command(command.Name.CLEAR_ELEMENT), 'WebElement.clear()'); } /** * Schedules a command to test whether this element is currently displayed. * @return {!promise.Promise<boolean>} A promise that will be * resolved with whether this element is currently visible on the page. */ isDisplayed() { return this.schedule_( new command.Command(command.Name.IS_ELEMENT_DISPLAYED), 'WebElement.isDisplayed()'); } /** * Take a screenshot of the visible region encompassed by this element's * bounding rectangle. * * @param {boolean=} opt_scroll Optional argument that indicates whether the * element should be scrolled into view before taking a screenshot. * Defaults to false. * @return {!promise.Promise<string>} A promise that will be * resolved to the screenshot as a base-64 encoded PNG. */ takeScreenshot(opt_scroll) { var scroll = !!opt_scroll; return this.schedule_( new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT) .setParameter('scroll', scroll), 'WebElement.takeScreenshot(' + scroll + ')'); } } /** * WebElementPromise is a promise that will be fulfilled with a WebElement. * This serves as a forward proxy on WebElement, allowing calls to be * scheduled without directly on this instance before the underlying * WebElement has been fulfilled. In other words, the following two statements * are equivalent: * * driver.findElement({id: 'my-button'}).click(); * driver.findElement({id: 'my-button'}).then(function(el) { * return el.click(); * }); * * @implements {promise.Thenable<!WebElement>} * @final */ class WebElementPromise extends WebElement { /** * @param {!WebDriver} driver The parent WebDriver instance for this * element. * @param {!promise.Promise<!WebElement>} el A promise * that will resolve to the promised element. */ constructor(driver, el) { super(driver, 'unused'); /** @override */ this.cancel = el.cancel.bind(el); /** @override */ this.isPending = el.isPending.bind(el); /** @override */ this.then = el.then.bind(el); /** @override */ this.catch = el.catch.bind(el); /** @override */ this.finally = el.finally.bind(el); /** * Defers returning the element ID until the wrapped WebElement has been * resolved. * @override */ this.getId = function() { return el.then(function(el) { return el.getId(); }); }; } } promise.Thenable.addImplementation(WebElementPromise); ////////////////////////////////////////////////////////////////////////////// // // Alert // ////////////////////////////////////////////////////////////////////////////// /** * Represents a modal dialog such as {@code alert}, {@code confirm}, or * {@code prompt}. Provides functions to retrieve the message displayed with * the alert, accept or dismiss the alert, and set the response text (in the * case of {@code prompt}). */ class Alert { /** * @param {!WebDriver} driver The driver controlling the browser this alert * is attached to. * @param {string} text The message text displayed with this alert. */ constructor(driver, text) { /** @private {!WebDriver} */ this.driver_ = driver; /** @private {!promise.Promise<string>} */ this.text_ = promise.fulfilled(text); } /** * Retrieves the message text displayed with this alert. For instance, if the * alert were opened with alert("hello"), then this would return "hello". * * @return {!promise.Promise<string>} A promise that will be * resolved to the text displayed with this alert. */ getText() { return this.text_; } /** * Sets the username and password in an alert prompting for credentials (such * as a Basic HTTP Auth prompt). This method will implicitly * {@linkplain #accept() submit} the dialog. * * @param {string} username The username to send. * @param {string} password The password to send. * @return {!promise.Promise<void>} A promise that will be resolved when this * command has completed. */ authenticateAs(username, password) { return this.driver_.schedule( new command.Command(command.Name.SET_ALERT_CREDENTIALS), 'WebDriver.switchTo().alert()' + `.authenticateAs("${username}", "${password}")`); } /** * Accepts this alert. * * @return {!promise.Promise<void>} A promise that will be resolved * when this command has completed. */ accept() { return this.driver_.schedule( new command.Command(command.Name.ACCEPT_ALERT), 'WebDriver.switchTo().alert().accept()'); } /** * Dismisses this alert. * * @return {!promise.Promise<void>} A promise that will be resolved * when this command has completed. */ dismiss() { return this.driver_.schedule( new command.Command(command.Name.DISMISS_ALERT), 'WebDriver.switchTo().alert().dismiss()'); } /** * Sets the response text on this alert. This command will return an error if * the underlying alert does not support response text (e.g. window.alert and * window.confirm). * * @param {string} text The text to set. * @return {!promise.Promise<void>} A promise that will be resolved * when this command has completed. */ sendKeys(text) { return this.driver_.schedule( new command.Command(command.Name.SET_ALERT_TEXT). setParameter('text', text), 'WebDriver.switchTo().alert().sendKeys(' + text + ')'); } } /** * AlertPromise is a promise that will be fulfilled with an Alert. This promise * serves as a forward proxy on an Alert, allowing calls to be scheduled * directly on this instance before the underlying Alert has been fulfilled. In * other words, the following two statements are equivalent: * * driver.switchTo().alert().dismiss(); * driver.switchTo().alert().then(function(alert) { * return alert.dismiss(); * }); * * @implements {promise.Thenable.<!webdriver.Alert>} * @final */ class AlertPromise extends Alert { /** * @param {!WebDriver} driver The driver controlling the browser this * alert is attached to. * @param {!promise.Thenable<!Alert>} alert A thenable * that will be fulfilled with the promised alert. */ constructor(driver, alert) { super(driver, 'unused'); /** @override */ this.cancel = alert.cancel.bind(alert); /** @override */ this.isPending = alert.isPending.bind(alert); /** @override */ this.then = alert.then.bind(alert); /** @override */ this.catch = alert.catch.bind(alert); /** @override */ this.finally = alert.finally.bind(alert); /** * Defer returning text until the promised alert has been resolved. * @override */ this.getText = function() { return alert.then(function(alert) { return alert.getText(); }); }; /** * Defers action until the alert has been located. * @override */ this.authenticateAs = function(username, password) { return alert.then(function(alert) { return alert.authenticateAs(username, password); }); }; /** * Defers action until the alert has been located. * @override */ this.accept = function() { return alert.then(function(alert) { return alert.accept(); }); }; /** * Defers action until the alert has been located. * @override */ this.dismiss = function() { return alert.then(function(alert) { return alert.dismiss(); }); }; /** * Defers action until the alert has been located. * @override */ this.sendKeys = function(text) { return alert.then(function(alert) { return alert.sendKeys(text); }); }; } } promise.Thenable.addImplementation(AlertPromise); // PUBLIC API module.exports = { Alert: Alert, AlertPromise: AlertPromise, Condition: Condition, Logs: Logs, Navigation: Navigation, Options: Options, TargetLocator: TargetLocator, Timeouts: Timeouts, WebDriver: WebDriver, WebElement: WebElement, WebElementCondition: WebElementCondition, WebElementPromise: WebElementPromise, Window: Window };
1
13,587
This should only be sent if the driver is speaking to a W3C conformant remote, so we need an if-condition check like we have in the Python bindings.
SeleniumHQ-selenium
py
@@ -7,7 +7,7 @@ module MentorHelper mail_to( mentor.email, I18n.t( - 'dashboards.show.contact_your_mentor', + 'shared.header.contact_your_mentor', mentor_name: mentor.first_name ) )
1
module MentorHelper def mentor_image(mentor) image_tag gravatar_url(mentor.email, size: '300') end def mentor_contact_link(mentor) mail_to( mentor.email, I18n.t( 'dashboards.show.contact_your_mentor', mentor_name: mentor.first_name ) ) end end
1
12,835
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -90,6 +90,18 @@ class ConfigValidatorUtilTest(ForsetiTestCase): actual_path = cv_data_converter.generate_ancestry_path(full_name) self.assertEqual(expected_path, actual_path) + def test_generate_ancestors_for_org(self): + full_name = 'organization/1234567890/project/test-project-123/firewall/1234567890123456789/' + expected_ancestors = ['organization/1234567890', 'project/test-project-123'] + actual_ancestors = cv_data_converter.generate_ancestors(full_name) + self.assertListEqual(expected_ancestors, actual_ancestors) + + def test_generate_ancestors_for_folder(self): + full_name = 'folder/folder-1/project/project-2/lien/p123/' + expected_ancestors = ['folder/folder-1', 'project/project-2'] + actual_ancestors = cv_data_converter.generate_ancestors(full_name) + self.assertListEqual(expected_ancestors, actual_ancestors) + def test_cleanup_dict_does_not_replace_false(self): firewall = mock_cai_resources.FIREWALL_DATA cv_data_converter.cleanup_dict(firewall)
1
# Copyright 2020 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the Config Validator utils functionality.""" from google.cloud.forseti.scanner.scanners.config_validator_util \ import cv_data_converter from tests.scanner.scanners.config_validator_util.test_data \ import mock_cai_resources from tests.unittest_utils import ForsetiTestCase from tests.scanner.test_data import fake_data_models class Resource(object): def __init__(self, cai_resource_name, cai_resource_type, full_name, type_name, data): self.cai_resource_name = cai_resource_name self.cai_resource_type = cai_resource_type self.full_name = full_name self.type_name = type_name self.data = data def _mock_gcp_resource_iter(resource_type): """Creates a list of GCP resource mocks retrieved by the scanner.""" resource = Resource( cai_resource_name=resource_type.get('resource').get('cai_resource_name'), cai_resource_type=resource_type.get('resource').get('cai_resource_type'), full_name=resource_type.get('resource').get('full_name'), type_name=resource_type.get('resource').get('type_name'), data=resource_type.get('resource').get('data') ) return resource class ConfigValidatorUtilTest(ForsetiTestCase): """Test for the Config Validator Util.""" def test_convert_data_to_cai_asset(self): """Validate convert_data_to_cai_asset() with test resource.""" expected_resource = _mock_gcp_resource_iter( fake_data_models.EXPECTED_NON_CAI_RESOURCE) resource = ( _mock_gcp_resource_iter(fake_data_models.FAKE_NON_CAI_RESOURCE)) primary_key = fake_data_models.FAKE_NON_CAI_RESOURCE.get('primary_key') resource_type = ( fake_data_models.FAKE_NON_CAI_RESOURCE.get('resource_type')) converted_resource = cv_data_converter.convert_data_to_cai_asset( primary_key, resource, resource_type) self.assertEqual(expected_resource.__dict__, converted_resource.__dict__) def test_convert_data_to_cv_asset(self): """Validate convert_data_to_cv_asset() with test resource.""" resource = _mock_gcp_resource_iter( fake_data_models.EXPECTED_NON_CAI_RESOURCE) expected_name = "//cloudresourcemanager.googleapis.com/Lien/lien/p123" expected_asset_type = "cloudresourcemanager.googleapis.com/Lien" converted_resource = cv_data_converter.convert_data_to_cv_asset( resource, fake_data_models.EXPECTED_NON_CAI_RESOURCE["data_type"]) self.assertEqual(expected_name, converted_resource.name, ) self.assertEqual(expected_asset_type, converted_resource.asset_type) def test_generate_ancestry_path_for_org(self): full_name = 'organization/1234567890/project/test-project-123/firewall/1234567890123456789/' expected_path = 'organization/1234567890/project/test-project-123/' actual_path = cv_data_converter.generate_ancestry_path(full_name) self.assertEqual(expected_path, actual_path) def test_generate_ancestry_path_for_folder(self): full_name = 'folder/folder-1/project/project-2/lien/p123/' expected_path = 'folder/folder-1/project/project-2/' actual_path = cv_data_converter.generate_ancestry_path(full_name) self.assertEqual(expected_path, actual_path) def test_cleanup_dict_does_not_replace_false(self): firewall = mock_cai_resources.FIREWALL_DATA cv_data_converter.cleanup_dict(firewall) self.assertIsNotNone(firewall['disabled']) def test_cleanup_replaces_empty_values(self): bucket = mock_cai_resources.BUCKET_DATA cv_data_converter.cleanup_dict(bucket) self.assertIsNone(bucket['acl'])
1
35,832
nit: folders are always a numeric ID, projects as given by CAI use project number for ancestry
forseti-security-forseti-security
py
@@ -35,7 +35,12 @@ import ( "github.com/multiformats/go-multistream" ) -var _ p2p.Service = (*Service)(nil) +var ( + _ p2p.Service = (*Service)(nil) + + // ErrBadNetwork indicates that it is suspected that network is currently in bad condition + ErrBadNetwork = errors.New("bad network") +) type Service struct { ctx context.Context
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package libp2p import ( "context" "crypto/ecdsa" "errors" "fmt" "net" "github.com/ethersphere/bee/pkg/addressbook" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/breaker" handshake "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/handshake" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/tracing" "github.com/libp2p/go-libp2p" autonat "github.com/libp2p/go-libp2p-autonat-svc" crypto "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/helpers" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" libp2ppeer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" protocol "github.com/libp2p/go-libp2p-core/protocol" "github.com/libp2p/go-libp2p-peerstore/pstoremem" libp2pquic "github.com/libp2p/go-libp2p-quic-transport" "github.com/libp2p/go-tcp-transport" ws "github.com/libp2p/go-ws-transport" ma "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multistream" ) var _ p2p.Service = (*Service)(nil) type Service struct { ctx context.Context host host.Host libp2pPeerstore peerstore.Peerstore metrics metrics networkID int32 handshakeService *handshake.Service addrssbook addressbook.Putter peers *peerRegistry peerHandler func(context.Context, swarm.Address) error conectionBreaker breaker.Interface logger logging.Logger tracer *tracing.Tracer } type Options struct { PrivateKey *ecdsa.PrivateKey Overlay swarm.Address Addr string DisableWS bool DisableQUIC bool NetworkID int32 Addressbook addressbook.Putter Logger logging.Logger Tracer *tracing.Tracer } func New(ctx context.Context, o Options) (*Service, error) { host, port, err := net.SplitHostPort(o.Addr) if err != nil { return nil, fmt.Errorf("address: %w", err) } ip4Addr := "0.0.0.0" ip6Addr := "::1" if host != "" { ip := net.ParseIP(host) if ip4 := ip.To4(); ip4 != nil { ip4Addr = ip4.String() ip6Addr = "" } else if ip6 := ip.To16(); ip6 != nil { ip6Addr = ip6.String() ip4Addr = "" } } var listenAddrs []string if ip4Addr != "" { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port)) if !o.DisableWS { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port)) } if !o.DisableQUIC { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/udp/%s/quic", ip4Addr, port)) } } if ip6Addr != "" { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port)) if !o.DisableWS { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", ip6Addr, port)) } if !o.DisableQUIC { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/udp/%s/quic", ip6Addr, port)) } } security := libp2p.DefaultSecurity libp2pPeerstore := pstoremem.NewPeerstore() opts := []libp2p.Option{ libp2p.ListenAddrStrings(listenAddrs...), security, // Attempt to open ports using uPNP for NATed hosts. libp2p.NATPortMap(), // Use dedicated peerstore instead the global DefaultPeerstore libp2p.Peerstore(libp2pPeerstore), } if o.PrivateKey != nil { opts = append(opts, libp2p.Identity((*crypto.Secp256k1PrivateKey)(o.PrivateKey)), ) } transports := []libp2p.Option{ libp2p.Transport(tcp.NewTCPTransport), } if !o.DisableWS { transports = append(transports, libp2p.Transport(ws.New)) } if !o.DisableQUIC { transports = append(transports, libp2p.Transport(libp2pquic.NewTransport)) } opts = append(opts, transports...) h, err := libp2p.New(ctx, opts...) if err != nil { return nil, err } // If you want to help other peers to figure out if they are behind // NATs, you can launch the server-side of AutoNAT too (AutoRelay // already runs the client) if _, err = autonat.NewAutoNATService(ctx, h, // Support same non default security and transport options as // original host. append(transports, security)..., ); err != nil { return nil, fmt.Errorf("autonat: %w", err) } peerRegistry := newPeerRegistry() s := &Service{ ctx: ctx, host: h, libp2pPeerstore: libp2pPeerstore, metrics: newMetrics(), networkID: o.NetworkID, handshakeService: handshake.New(o.Overlay, o.NetworkID, o.Logger), peers: peerRegistry, addrssbook: o.Addressbook, logger: o.Logger, tracer: o.Tracer, conectionBreaker: breaker.NewBreaker(breaker.Options{}), // todo: fill non-default options } // Construct protocols. id := protocol.ID(p2p.NewSwarmStreamName(handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)) matcher, err := s.protocolSemverMatcher(id) if err != nil { return nil, fmt.Errorf("protocol version match %s: %w", id, err) } // handshake s.host.SetStreamHandlerMatch(id, matcher, func(stream network.Stream) { peerID := stream.Conn().RemotePeer() i, err := s.handshakeService.Handle(NewStream(stream), peerID) if err != nil { if err == handshake.ErrNetworkIDIncompatible { s.logger.Warningf("peer %s has a different network id.", peerID) } if err == handshake.ErrHandshakeDuplicate { s.logger.Warningf("handshake happened for already connected peer %s", peerID) } s.logger.Debugf("handshake: handle %s: %v", peerID, err) s.logger.Errorf("unable to handshake with peer %v", peerID) _ = s.disconnect(peerID) return } if exists := s.peers.addIfNotExists(stream.Conn(), i.Address); exists { _ = stream.Close() return } _ = stream.Close() remoteMultiaddr, err := ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", stream.Conn().RemoteMultiaddr().String(), peerID.Pretty())) if err != nil { s.logger.Debugf("multiaddr error: handle %s: %v", peerID, err) s.logger.Errorf("unable to connect with peer %v", peerID) _ = s.disconnect(peerID) return } err = s.addrssbook.Put(i.Address, remoteMultiaddr) if err != nil { s.logger.Debugf("handshake: addressbook put error %s: %v", peerID, err) s.logger.Errorf("unable to persist peer %v", peerID) _ = s.disconnect(peerID) return } if s.peerHandler != nil { if err := s.peerHandler(ctx, i.Address); err != nil { s.logger.Debugf("peerhandler error: %s: %v", peerID, err) } } s.metrics.HandledStreamCount.Inc() s.logger.Infof("peer %s connected", i.Address) }) h.Network().SetConnHandler(func(_ network.Conn) { s.metrics.HandledConnectionCount.Inc() }) h.Network().Notify(peerRegistry) // update peer registry on network events h.Network().Notify(s.handshakeService) // update handshake service on network events return s, nil } func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) { for _, ss := range p.StreamSpecs { id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name)) matcher, err := s.protocolSemverMatcher(id) if err != nil { return fmt.Errorf("protocol version match %s: %w", id, err) } s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) { peerID := streamlibp2p.Conn().RemotePeer() overlay, found := s.peers.overlay(peerID) if !found { // todo: this should never happen, should we disconnect in this case? // todo: test connection close and refactor _ = s.disconnect(peerID) s.logger.Errorf("overlay address for peer %q not found", peerID) return } stream := newStream(streamlibp2p) // exchange headers if err := handleHeaders(ss.Headler, stream); err != nil { s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: handle headers: %v", p.Name, p.Version, ss.Name, overlay, err) return } // tracing: get span tracing context and add it to the context // silently ignore if the peer is not providing tracing ctx, err := s.tracer.WithContextFromHeaders(s.ctx, stream.Headers()) if err != nil && !errors.Is(err, tracing.ErrContextNotFound) { s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: get tracing context: %v", p.Name, p.Version, ss.Name, overlay, err) return } logger := tracing.NewLoggerWithTraceID(ctx, s.logger) logger.Tracef("handle protocol %s/%s: stream %s: peer %s", p.Name, p.Version, ss.Name, overlay) s.metrics.HandledStreamCount.Inc() if err := ss.Handler(ctx, p2p.Peer{Address: overlay}, stream); err != nil { var e *p2p.DisconnectError if errors.Is(err, e) { // todo: test connection close and refactor _ = s.Disconnect(overlay) } logger.Debugf("handle protocol %s/%s: stream %s: peer %s: %v", p.Name, p.Version, ss.Name, overlay, err) return } }) } return nil } func (s *Service) Addresses() (addrs []ma.Multiaddr, err error) { // Build host multiaddress hostAddr, err := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", s.host.ID().Pretty())) if err != nil { return nil, err } // Now we can build a full multiaddress to reach this host // by encapsulating both addresses: for _, addr := range s.host.Addrs() { addrs = append(addrs, addr.Encapsulate(hostAddr)) } return addrs, nil } func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (overlay swarm.Address, err error) { // Extract the peer ID from the multiaddr. info, err := libp2ppeer.AddrInfoFromP2pAddr(addr) if err != nil { return swarm.Address{}, err } if _, found := s.peers.overlay(info.ID); found { return swarm.Address{}, p2p.ErrAlreadyConnected } if err := s.conectionBreaker.Execute(func() error { return s.host.Connect(ctx, *info) }); err != nil { return swarm.Address{}, err } stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName) if err != nil { _ = s.disconnect(info.ID) return swarm.Address{}, err } i, err := s.handshakeService.Handshake(NewStream(stream)) if err != nil { _ = s.disconnect(info.ID) return swarm.Address{}, fmt.Errorf("handshake: %w", err) } if exists := s.peers.addIfNotExists(stream.Conn(), i.Address); exists { if err := helpers.FullClose(stream); err != nil { return swarm.Address{}, err } return i.Address, nil } if err := helpers.FullClose(stream); err != nil { return swarm.Address{}, err } s.metrics.CreatedConnectionCount.Inc() s.logger.Infof("peer %s connected", i.Address) return i.Address, nil } func (s *Service) Disconnect(overlay swarm.Address) error { peerID, found := s.peers.peerID(overlay) if !found { return p2p.ErrPeerNotFound } return s.disconnect(peerID) } func (s *Service) disconnect(peerID libp2ppeer.ID) error { if err := s.host.Network().ClosePeer(peerID); err != nil { return err } s.peers.remove(peerID) return nil } func (s *Service) Peers() []p2p.Peer { return s.peers.peers() } func (s *Service) SetPeerAddedHandler(h func(context.Context, swarm.Address) error) { s.peerHandler = h } func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) { peerID, found := s.peers.peerID(overlay) if !found { return nil, p2p.ErrPeerNotFound } streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName) if err != nil { return nil, err } stream := newStream(streamlibp2p) // tracing: add span context header if headers == nil { headers = make(p2p.Headers) } if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) { return nil, err } // exchange headers if err := sendHeaders(ctx, headers, stream); err != nil { return nil, fmt.Errorf("send headers: %w", err) } return stream, nil } func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) { swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName) st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName)) if err != nil { if err == multistream.ErrNotSupported || err == multistream.ErrIncorrectVersion { return nil, p2p.NewIncompatibleStreamError(err) } return nil, fmt.Errorf("create stream %q to %q: %w", swarmStreamName, peerID, err) } s.metrics.CreatedStreamCount.Inc() return st, nil } func (s *Service) Close() error { if err := s.libp2pPeerstore.Close(); err != nil { return err } return s.host.Close() }
1
9,706
is this used anywhere?
ethersphere-bee
go
@@ -28,7 +28,6 @@ import ( apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned" "github.com/openebs/maya/pkg/debug" - merrors "github.com/openebs/maya/pkg/errors/v1alpha1" pkg_errors "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors"
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package replicacontroller import ( "encoding/json" "fmt" "os" "reflect" "strings" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" "github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned" "github.com/openebs/maya/pkg/debug" merrors "github.com/openebs/maya/pkg/errors/v1alpha1" pkg_errors "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" "k8s.io/klog" ) const ( v130 = "1.3.0" ) type upgradeParams struct { cvr *apis.CStorVolumeReplica client clientset.Interface } type upgradeFunc func(u *upgradeParams) (*apis.CStorVolumeReplica, error) var ( upgradeMap = map[string]upgradeFunc{ "1.0.0": setReplicaID, "1.1.0": setReplicaID, "1.2.0": setReplicaID, } ) // CVRPatch struct represent the struct used to patch // the cvr object type CVRPatch struct { // Op defines the operation Op string `json:"op"` // Path defines the key path // eg. for // { // "Name": "openebs" // Category: { // "Inclusive": "v1", // "Rank": "A" // } // } // The path of 'Inclusive' would be // "/Name/Category/Inclusive" Path string `json:"path"` Value string `json:"value"` } // syncHandler handles CVR changes based on the provided // operation. It reconciles desired state of CVR with the // actual state. // // Finally, it updates CVR Status func (c *CStorVolumeReplicaController) syncHandler( key string, operation common.QueueOperation, ) error { cvrGot, err := c.getVolumeReplicaResource(key) if err != nil { return err } if cvrGot == nil { return merrors.Errorf( "failed to reconcile cvr {%s}: object not found", key, ) } cvrGot, err = c.populateVersion(cvrGot) if err != nil { klog.Errorf("failed to add versionDetails to cvr %s:%s", cvrGot.Name, err.Error()) c.recorder.Event( cvrGot, corev1.EventTypeWarning, "FailedPopulate", fmt.Sprintf("Failed to add current version: %s", err.Error()), ) return nil } cvrGot, err = c.reconcileVersion(cvrGot) if err != nil { klog.Errorf("failed to upgrade cvr %s:%s", cvrGot.Name, err.Error()) c.recorder.Event( cvrGot, corev1.EventTypeWarning, "FailedUpgrade", fmt.Sprintf("Failed to upgrade cvr to %s version: %s", cvrGot.VersionDetails.Desired, err.Error(), ), ) cvrGot.VersionDetails.Status.SetErrorStatus( "Failed to reconcile cvr version", err, ) _, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace).Update(cvrGot) if err != nil { klog.Errorf("failed to update versionDetails status for cvr %s:%s", cvrGot.Name, err.Error()) } return nil } status, err := c.cVREventHandler(operation, cvrGot) if status == "" { // TODO // need to rethink on this logic !! // status holds more importance than error return nil } cvrGot.Status.LastUpdateTime = metav1.Now() if cvrGot.Status.Phase != apis.CStorVolumeReplicaPhase(status) { cvrGot.Status.LastTransitionTime = cvrGot.Status.LastUpdateTime // set phase based on received status cvrGot.Status.Phase = apis.CStorVolumeReplicaPhase(status) } // need to update cvr before returning this error if err != nil { if debug.EI.IsCVRUpdateErrorInjected() { return merrors.Errorf("CVR update error via injection") } _, err1 := c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace). Update(cvrGot) if err1 != nil { return merrors.Wrapf( err, "failed to reconcile cvr {%s}: failed to update cvr with phase {%s}: {%s}", key, cvrGot.Status.Phase, err1.Error(), ) } return merrors.Wrapf(err, "failed to reconcile cvr {%s}", key) } // Synchronize cstor volume total allocated and // used capacity fields on CVR object. // Any kind of sync activity should be done from here. c.syncCvr(cvrGot) _, err = c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace). Update(cvrGot) if err != nil { return merrors.Wrapf( err, "failed to reconcile cvr {%s}: failed to update cvr with phase {%s}", key, cvrGot.Status.Phase, ) } klog.V(4).Infof( "cvr {%s} reconciled successfully with current phase being {%s}", key, cvrGot.Status.Phase, ) return nil } func (c *CStorVolumeReplicaController) cVREventHandler( operation common.QueueOperation, cvrObj *apis.CStorVolumeReplica, ) (string, error) { err := volumereplica.CheckValidVolumeReplica(cvrObj) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate), ) return string(apis.CVRStatusOffline), err } // PoolNameHandler tries to get pool name and blocks for // particular number of attempts. var noOfAttempts = 2 if !common.PoolNameHandler(cvrObj, noOfAttempts) { return string(cvrObj.Status.Phase), merrors.New("pool not found") } // cvr is created at zfs in the form poolname/volname fullVolName := volumereplica.PoolNameFromCVR(cvrObj) + "/" + cvrObj.Labels["cstorvolume.openebs.io/name"] switch operation { case common.QOpAdd: klog.Infof( "will process add event for cvr {%s} as volume {%s}", cvrObj.Name, fullVolName, ) status, err := c.cVRAddEventHandler(cvrObj, fullVolName) return status, err case common.QOpDestroy: klog.Infof( "will process delete event for cvr {%s} as volume {%s}", cvrObj.Name, fullVolName, ) err := volumereplica.DeleteVolume(fullVolName) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy), ) return string(apis.CVRStatusDeletionFailed), err } err = c.removeFinalizer(cvrObj) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureRemoveFinalizer), string(common.MessageResourceFailDestroy), ) return string(apis.CVRStatusDeletionFailed), err } return "", nil case common.QOpModify: fallthrough case common.QOpSync: klog.V(4).Infof( "will process sync event for cvr {%s} as volume {%s}", cvrObj.Name, operation, ) if isCVRCreateStatus(cvrObj) { return c.cVRAddEventHandler(cvrObj, fullVolName) } return c.getCVRStatus(cvrObj) } klog.Errorf( "failed to handle event for cvr {%s}: operation {%s} not supported", cvrObj.Name, string(operation), ) return string(apis.CVRStatusInvalid), nil } // removeFinalizer removes finalizers present in // CVR resource func (c *CStorVolumeReplicaController) removeFinalizer( cvrObj *apis.CStorVolumeReplica, ) error { cvrPatch := []CVRPatch{ CVRPatch{ Op: "remove", Path: "/metadata/finalizers", }, } cvrPatchBytes, err := json.Marshal(cvrPatch) if err != nil { return merrors.Wrapf( err, "failed to remove finalizers from cvr {%s}", cvrObj.Name, ) } _, err = c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace). Patch(cvrObj.Name, types.JSONPatchType, cvrPatchBytes) if err != nil { return merrors.Wrapf( err, "failed to remove finalizers from cvr {%s}", cvrObj.Name, ) } klog.Infof("finalizers removed successfully from cvr {%s}", cvrObj.Name) return nil } func (c *CStorVolumeReplicaController) cVRAddEventHandler( cVR *apis.CStorVolumeReplica, fullVolName string, ) (string, error) { var err error // lock is to synchronize pool and volumereplica. Until certain pool related // operations are over, the volumereplica threads will be held. common.SyncResources.Mux.Lock() if common.SyncResources.IsImported { common.SyncResources.Mux.Unlock() // To check if volume is already imported with pool. importedFlag := common.CheckForInitialImportedPoolVol( common.InitialImportedPoolVol, fullVolName, ) if importedFlag && !IsEmptyStatus(cVR) { klog.Infof( "CStorVolumeReplica %v is already imported", string(cVR.ObjectMeta.UID), ) c.recorder.Event( cVR, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported), ) // If the volume already present then get the status of replica from ZFS // and update it with corresponding status phase. If status gives error // then return old phase. return getVolumeReplicaStatus(cVR, fullVolName) } } else { common.SyncResources.Mux.Unlock() } // Below block will be useful when the only cstor-pool-mgmt gets restarted // then it is required to cross-check whether the volume exists or not. existingvol, _ := volumereplica.GetVolumes() if common.CheckIfPresent(existingvol, fullVolName) { klog.Warningf( "CStorVolumeReplica %v is already present", string(cVR.GetUID()), ) c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent), ) // After creating zfs datasets in zpool but update to etcd might be // failed if isEmptyReplicaID(cVR) { cVR.Spec.ReplicaID, err = volumereplica.GetReplicaIDFromZFS(fullVolName) if err != nil { // If error happened then update with same as with existing CVR // phase. So, in next reconciliation it will try to update with // proper changes return string(cVR.Status.Phase), pkg_errors.Wrapf(err, "volume replica %s exists", cVR.Name) } } // If the volume already present then get the status of replica from ZFS // and update it with corresponding status return getVolumeReplicaStatus(cVR, fullVolName) } //TODO: Follow best practice while refactor reconciliation logic if isCVRCreateStatus(cVR) { return c.createVolumeReplica(cVR, fullVolName) } return string(apis.CVRStatusOffline), fmt.Errorf( "VolumeReplica offline: %v, %v", cVR.Name, cVR.Labels["cstorvolume.openebs.io/name"], ) } // createVolumeReplica will do following things // 1. If replicaID is empty and if it is new volume generate replicaID. // 2. Trigger ZFS volume dataset create command on success get the status from // ZFS and update it. If `ZFS command` fails then return with same status phase // which is currently holding by CVR. func (c *CStorVolumeReplicaController) createVolumeReplica( cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { // Setting quorum to true for newly creating Volumes. var quorum = true if IsRecreateStatus(cVR) { klog.Infof( "Pool is recreated hence creating the volumes by setting off the quorum property", ) quorum = false } // We should generate replicaID for new volume replicas only if it doesn't has // replica ID. if isEmptyReplicaID(cVR) && (IsEmptyStatus(cVR) || IsInitStatus(cVR)) { if err := volumereplica.GenerateReplicaID(cVR); err != nil { klog.Errorf("cVR ReplicaID creation failure: %v", err.Error()) return string(cVR.Status.Phase), err } } if len(cVR.Spec.ReplicaID) == 0 { return string(cVR.Status.Phase), merrors.New("ReplicaID is not set") } err := volumereplica.CreateVolumeReplica(cVR, fullVolName, quorum) if err != nil { klog.Errorf("cVR creation failure: %v", err.Error()) c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.FailureCreate), fmt.Sprintf("failed to create volume replica error: %v", err.Error()), ) return string(cVR.Status.Phase), err } c.recorder.Event( cVR, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated), ) klog.Infof( "cVR creation successful: %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID()), ) return getVolumeReplicaStatus(cVR, fullVolName) } // getVolumeReplicaStatus return the status of replica after executing ZFS // stats command and return previous state and error if any error occured while // getting the status from ZFS func getVolumeReplicaStatus( cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { status, err := volumereplica.Status(fullVolName) if err != nil { return string(cVR.Status.Phase), err } return status, nil } // getVolumeReplicaResource returns object corresponding to the resource key func (c *CStorVolumeReplicaController) getVolumeReplicaResource( key string, ) (*apis.CStorVolumeReplica, error) { // Convert the key(namespace/name) string into a distinct name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil, nil } cStorVolumeReplicaUpdated, err := c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(namespace). Get(name, metav1.GetOptions{}) if err != nil { // The cStorPool resource may no longer exist, in which case we stop // processing. if errors.IsNotFound(err) { runtime.HandleError( fmt.Errorf( "cStorVolumeReplicaUpdated '%s' in work queue no longer exists", key, ), ) return nil, nil } return nil, err } return cStorVolumeReplicaUpdated, nil } // IsRightCStorVolumeReplica is to check if the cvr // request is for particular pod/application. func IsRightCStorVolumeReplica(cVR *apis.CStorVolumeReplica) bool { if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"])) != "" { return os.Getenv(string(common.OpenEBSIOCStorID)) == string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"]) } if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"])) != "" { return os.Getenv(string(common.OpenEBSIOCSPIID)) == string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"]) } return false } // IsDestroyEvent is to check if the call is for CStorVolumeReplica destroy. func IsDestroyEvent(cVR *apis.CStorVolumeReplica) bool { if cVR.ObjectMeta.DeletionTimestamp != nil { return true } return false } // IsOnlyStatusChange is to check only status change of cStorVolumeReplica object. func IsOnlyStatusChange(oldCVR, newCVR *apis.CStorVolumeReplica) bool { if reflect.DeepEqual(oldCVR.Spec, newCVR.Spec) && !reflect.DeepEqual(oldCVR.Status, newCVR.Status) { return true } return false } // IsDeletionFailedBefore flags if status of // cvr is CVRStatusDeletionFailed func IsDeletionFailedBefore(cvrObj *apis.CStorVolumeReplica) bool { return cvrObj.Status.Phase == apis.CVRStatusDeletionFailed } // IsOnlineStatus is to check if the status of cStorVolumeReplica object is // Healthy. func IsOnlineStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusOnline) { klog.Infof("cVR Healthy status: %v", string(cVR.ObjectMeta.UID)) return true } klog.Infof( "cVR '%s': uid '%s': phase '%s': is_healthy_status: false", string(cVR.ObjectMeta.Name), string(cVR.ObjectMeta.UID), cVR.Status.Phase, ) return false } // IsEmptyStatus is to check if the status of cStorVolumeReplica object is empty. func IsEmptyStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusEmpty) { klog.Infof("cVR empty status: %v", string(cVR.ObjectMeta.UID)) return true } klog.Infof( "cVR '%s': uid '%s': phase '%s': is_empty_status: false", string(cVR.ObjectMeta.Name), string(cVR.ObjectMeta.UID), cVR.Status.Phase, ) return false } // IsInitStatus is to check if the status of cStorVolumeReplica object is pending. func IsInitStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusInit) { klog.Infof("cVR pending: %v", string(cVR.ObjectMeta.UID)) return true } klog.V(4).Infof("Not pending status: %v", string(cVR.ObjectMeta.UID)) return false } // IsRecreateStatus is to check if the status of cStorVolumeReplica object is // in recreated state. func IsRecreateStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusRecreate) { klog.Infof("cVR Recreate: %v", string(cVR.ObjectMeta.UID)) return true } klog.V(4).Infof("Not Recreate status: %v", string(cVR.ObjectMeta.UID)) return false } // isCVRCreateStatus returns true if volume replica needs to be created else // return false func isCVRCreateStatus(cVR *apis.CStorVolumeReplica) bool { cVRStatus := string(cVR.Status.Phase) if strings.EqualFold(cVRStatus, string(apis.CVRStatusEmpty)) || strings.EqualFold(cVRStatus, string(apis.CVRStatusRecreate)) || strings.EqualFold(cVRStatus, string(apis.CVRStatusInit)) { return true } return false } func isEmptyReplicaID(cVR *apis.CStorVolumeReplica) bool { return cVR.Spec.ReplicaID == "" } // getCVRStatus is a wrapper that fetches the status of cstor volume. func (c *CStorVolumeReplicaController) getCVRStatus( cVR *apis.CStorVolumeReplica, ) (string, error) { volumeName, err := volumereplica.GetVolumeName(cVR) if err != nil { return "", fmt.Errorf("unable to get volume name:%s", err.Error()) } replicaStatus, err := volumereplica.Status(volumeName) if err != nil { // ToDO : Put error in event recorder c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.FailureStatusSync), string(common.MessageResourceFailStatusSync), ) return "", err } return replicaStatus, nil } // syncCvr updates field on CVR object after fetching the values from zfs utility. func (c *CStorVolumeReplicaController) syncCvr(cvr *apis.CStorVolumeReplica) { // Get the zfs volume name corresponding to this cvr. volumeName, err := volumereplica.GetVolumeName(cvr) if err != nil { klog.Errorf("Unable to sync CVR capacity: %v", err) c.recorder.Event( cvr, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync), ) } // Get capacity of the volume. capacity, err := volumereplica.Capacity(volumeName) if err != nil { klog.Errorf("Unable to sync CVR capacity: %v", err) c.recorder.Event( cvr, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync), ) } else { cvr.Status.Capacity = *capacity } } func (c *CStorVolumeReplicaController) reconcileVersion(cvr *apis.CStorVolumeReplica) ( *apis.CStorVolumeReplica, error, ) { var err error // the below code uses deep copy to have the state of object just before // any update call is done so that on failure the last state object can be returned if cvr.VersionDetails.Status.Current != cvr.VersionDetails.Desired { if !apis.IsCurrentVersionValid(cvr.VersionDetails.Status.Current) { return cvr, pkg_errors.Errorf("invalid current version %s", cvr.VersionDetails.Status.Current) } if !apis.IsDesiredVersionValid(cvr.VersionDetails.Desired) { return cvr, pkg_errors.Errorf("invalid desired version %s", cvr.VersionDetails.Desired) } cvrObj := cvr.DeepCopy() if cvrObj.VersionDetails.Status.State != apis.ReconcileInProgress { cvrObj.VersionDetails.Status.SetInProgressStatus() cvrObj, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } } path := strings.Split(cvrObj.VersionDetails.Status.Current, "-")[0] u := &upgradeParams{ cvr: cvrObj, client: c.clientset, } // Get upgrade function for corresponding path, if path does not // exits then no upgrade is required and funcValue will be nil. funcValue := upgradeMap[path] if funcValue != nil { cvrObj, err = funcValue(u) if err != nil { return cvrObj, err } } cvr = cvrObj.DeepCopy() cvrObj.VersionDetails.SetSuccessStatus() cvrObj, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } return cvrObj, nil } return cvr, nil } // populateVersion assigns VersionDetails for old cvr object func (c *CStorVolumeReplicaController) populateVersion(cvr *apis.CStorVolumeReplica) ( *apis.CStorVolumeReplica, error, ) { v := cvr.Labels[string(apis.OpenEBSVersionKey)] // 1.3.0 onwards new CVR will have the field populated during creation if v < v130 && cvr.VersionDetails.Status.Current == "" { cvrObj := cvr.DeepCopy() cvrObj.VersionDetails.Status.Current = v cvrObj.VersionDetails.Desired = v cvrObj, err := c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cvrObj.Namespace). Update(cvrObj) if err != nil { return cvr, err } klog.Infof("Version %s added on cvr %s", v, cvrObj.Name) return cvrObj, nil } return cvr, nil } // setReplicaID sets the replica_id if not present for old cvrs when // they are upgraded to version 1.3.0 or above. func setReplicaID(u *upgradeParams) (*apis.CStorVolumeReplica, error) { cvr := u.cvr cvrObj := cvr.DeepCopy() err := volumereplica.GetAndUpdateReplicaID(cvrObj) if err != nil { return cvr, err } cvrObj, err = u.client.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } return cvrObj, nil }
1
17,662
change pkg_errors to errors. Keep it consistent across all the files.
openebs-maya
go
@@ -657,7 +657,8 @@ class LocalRemote(unittest.TestCase): resolved_results, _, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--resolved', 'json', - ["--url", self._url]) + ["--url", self._url, + "--review-status", "unreviewed", "confirmed", "false_positive"]) print(resolved_results) self.assertTrue(resolved_results)
1
# # ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- """diff_local_remote function test. Tests for the diff feature when comparing a local report directory with a remote run in the database. """ import json import os import re import shutil import subprocess import unittest from libtest import env from libtest.codechecker import create_baseline_file, get_diff_results class LocalRemote(unittest.TestCase): def setUp(self): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + test_workspace) # Get the test configuration from the prepared int the test workspace. self._test_cfg = env.import_test_cfg(test_workspace) # Get the test project configuration from the prepared test workspace. self._testproject_data = env.setup_test_proj_cfg(test_workspace) self.assertIsNotNone(self._testproject_data) # Setup a viewer client to test viewer API calls. self._cc_client = env.setup_viewer_client(test_workspace) self.assertIsNotNone(self._cc_client) # Get the CodeChecker cmd if needed for the tests. self._codechecker_cmd = env.codechecker_cmd() # Get the run names which belong to this test. self._run_names = env.get_run_names(test_workspace) self._local_test_project = \ self._test_cfg['test_project']['project_path_local'] self._remote_test_project = \ self._test_cfg['test_project']['project_path_remote'] self._local_reports = os.path.join(self._local_test_project, 'reports') self._remote_reports = os.path.join(self._remote_test_project, 'reports') self._url = env.parts_to_url(self._test_cfg['codechecker_cfg']) self._env = self._test_cfg['codechecker_cfg']['check_env'] def get_local_remote_diff(self, extra_args=None, format_type=None): """Return the unresolved results comparing local to a remote. Returns the text output of the diff command comparing the local reports to a remote run in the database. extra_args: can be used to add list of additional arguments to the diff command. Like filter arguments or to change output format. """ if not extra_args: extra_args = [] return get_diff_results([self._local_reports], [self._run_names[0]], '--unresolved', format_type, ['--url', self._url, *extra_args])[0] def test_local_to_remote_compare_count_new(self): """Count the new results with no filter in local compare mode.""" out, _, _ = get_diff_results([self._local_reports], [self._run_names[0]], '--new', None, ["--url", self._url]) count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 4) def test_remote_to_local_compare_count_new(self): """Count the new results with no filter.""" out, _, _ = get_diff_results([self._run_names[0]], [self._local_reports], '--new', None, ["--url", self._url]) # 5 new core.CallAndMessage issues. # 1 is suppressed in code count = len(re.findall(r'\[core\.CallAndMessage\]', out)) self.assertEqual(count, 5) # core.NullDereference was disabled in the remote analysis # so no results are new comapared to the local analysis. count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 0) def test_local_compare_count_unres(self): """Count the unresolved results with no filter.""" out, _, _ = get_diff_results( [self._local_reports], [self._run_names[0]], '--unresolved', None, ["--url", self._url]) print(out) count = len(re.findall(r'\[core\.CallAndMessage\]', out)) self.assertEqual(count, 0) count = len(re.findall(r'\[core\.DivideZero\]', out)) self.assertEqual(count, 10) count = len(re.findall(r'\[deadcode\.DeadStores\]', out)) self.assertEqual(count, 6) count = len(re.findall(r'\[cplusplus\.NewDelete\]', out)) self.assertEqual(count, 5) count = len(re.findall(r'\[unix\.Malloc\]', out)) self.assertEqual(count, 1) def test_local_compare_count_unres_rgx(self): """Count the unresolved results with no filter and run name regex.""" out, _, _ = get_diff_results( [self._local_reports], [self._run_names[0]], '--unresolved', None, ["--url", self._url]) print(out) count = len(re.findall(r'\[core\.CallAndMessage\]', out)) self.assertEqual(count, 0) count = len(re.findall(r'\[core\.DivideZero\]', out)) self.assertEqual(count, 10) count = len(re.findall(r'\[deadcode\.DeadStores\]', out)) self.assertEqual(count, 6) count = len(re.findall(r'\[cplusplus\.NewDelete\]', out)) self.assertEqual(count, 5) count = len(re.findall(r'\[unix\.Malloc\]', out)) self.assertEqual(count, 1) def test_local_cmp_filter_unres_severity(self): """Filter unresolved results by severity levels.""" res = self.get_local_remote_diff(['--severity', 'low']) self.assertEqual(len(re.findall(r'\[LOW\]', res)), 6) self.assertEqual(len(re.findall(r'\[HIGH\]', res)), 0) res = self.get_local_remote_diff(['--severity', 'high']) self.assertEqual(len(re.findall(r'\[LOW\]', res)), 0) self.assertEqual(len(re.findall(r'\[HIGH\]', res)), 18) res = self.get_local_remote_diff(['--severity', 'high', 'low']) self.assertEqual(len(re.findall(r'\[LOW\]', res)), 6) self.assertEqual(len(re.findall(r'\[HIGH\]', res)), 18) res = self.get_local_remote_diff() self.assertEqual(len(re.findall(r'\[LOW\]', res)), 6) self.assertEqual(len(re.findall(r'\[HIGH\]', res)), 18) def test_local_cmp_filter_unres_filepath(self): """Filter unresolved results by file path.""" res = self.get_local_remote_diff(['--file', '*divide_zero.cpp']) # Only 4 bugs can be found in the following file but in the # output the file names are printed again because of the summary. self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 6) self.assertEqual(len(re.findall(r'new_delete.cpp', res)), 0) res = self.get_local_remote_diff(['--file', 'divide_zero.cpp', # Exact match. '*new_delete.cpp']) self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 0) # Only 6 bugs can be found in the following file but in the # output the file names are printed again because of the summary. self.assertEqual(len(re.findall(r'new_delete.cpp', res)), 8) def test_local_cmp_filter_unres_checker_name(self): """Filter by checker name.""" res = self.get_local_remote_diff(['--checker-name', 'core.NullDereference']) self.assertEqual(len(re.findall(r'core.NullDereference', res)), 0) res = self.get_local_remote_diff(['--checker-name', 'core.*']) self.assertEqual(len(re.findall(r'core.*', res)), 15) # Filter by checker message (case insensitive). res = self.get_local_remote_diff(['--checker-msg', 'division by*']) self.assertEqual(len(re.findall(r'Division by.*', res)), 10) def test_local_cmp_filter_unres_filter_mix(self): """Filter by multiple filters file and severity.""" res = self.get_local_remote_diff(['--file', '*divide_zero.cpp', '--severity', 'high']) # Only 2 bugs can be found in the following file but in the # output the file names are printed again because of the summary. self.assertEqual(len(re.findall(r'divide_zero.cpp', res)), 4) self.assertEqual(len(re.findall(r'\[HIGH\]', res)), 2) def test_local_cmp_filter_unres_filter_mix_json(self): """Filter by multiple filters file and severity with json output.""" # TODO check if only high severity reports are retuned. reports = self.get_local_remote_diff(['--file', '*divide_zero.cpp', '--severity', 'high'], 'json') for report in reports: self.assertTrue("divide_zero.cpp" in report['file']['path'], "Report filename is different from the expected.") def test_local_compare_res_html_output_unresolved(self): """Check that html files will be generated by using diff command.""" html_reports = os.path.join(self._local_reports, "html_reports") get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'html', ["--url", self._url, '-e', html_reports, "--verbose", "debug"]) checked_files = set() for res in self.get_local_remote_diff(None, 'json'): checked_files.add(os.path.basename(res['file']['path'])) # Check if index.html file was generated. html_index = os.path.join(html_reports, "index.html") self.assertTrue(os.path.exists(html_index)) html_statistics = os.path.join(html_reports, "statistics.html") self.assertTrue(os.path.exists(html_statistics)) # Check that html files were generated for each reports. for html_file_names in os.listdir(html_reports): suffix = html_file_names.rfind("_") file_name = html_file_names[:suffix] \ if suffix != -1 else html_file_names if file_name in ["index.html", "statistics.html"]: continue self.assertIn(file_name, checked_files) def test_different_basename_types(self): """ Test different basename types. Test that diff command will fail when remote run and local report directory are given to the basename parameter. """ base_run_name = self._run_names[0] diff_cmd = [self._codechecker_cmd, "cmd", "diff", "-b", base_run_name, self._local_reports, "-n", self._local_reports, "--unresolved", "--url", self._url] with self.assertRaises(subprocess.CalledProcessError): subprocess.check_output( diff_cmd, env=self._env, cwd=os.environ['TEST_WORKSPACE'], encoding="utf-8", errors="ignore") def test_different_newname_types(self): """ Test different newname types. Test that diff command will fail when remote run and local report directory are given to the newname parameter. """ base_run_name = self._run_names[0] diff_cmd = [self._codechecker_cmd, "cmd", "diff", "-b", base_run_name, "-n", self._local_reports, base_run_name, "--unresolved", "--url", self._url] with self.assertRaises(subprocess.CalledProcessError): subprocess.check_output( diff_cmd, env=self._env, cwd=os.environ['TEST_WORKSPACE'], encoding="utf-8", errors="ignore") def test_diff_gerrit_output(self): """Test gerrit output. Every report should be in the gerrit review json. """ export_dir = os.path.join(self._local_reports, "export_dir1") env = self._env.copy() env["CC_REPO_DIR"] = '' env["CC_CHANGED_FILES"] = '' get_diff_results( [self._run_names[0]], [self._local_reports], '--new', 'gerrit', ["--url", self._url, "-e", export_dir], env) gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) with open(gerrit_review_file, 'r', encoding="utf-8", errors="ignore") as rw_file: review_data = json.load(rw_file) lbls = review_data["labels"] self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], "CodeChecker found 5 issue(s) in the code.") self.assertEqual(review_data["tag"], "jenkins") comments = review_data["comments"] self.assertEqual(len(comments), 1) file_path = next(iter(comments)) reports = comments[file_path] self.assertEqual(len(reports), 5) for report in reports: self.assertIn("message", report) self.assertIn("range", report) range = report["range"] self.assertIn("start_line", range) self.assertIn("start_character", range) self.assertIn("end_line", range) self.assertIn("end_character", range) shutil.rmtree(export_dir, ignore_errors=True) def test_diff_gerrit_stdout(self): """Test gerrit stdout output. Only one output format was selected the gerrit review json should be printed to stdout. """ env = self._env.copy() env["CC_REPO_DIR"] = '' env["CC_CHANGED_FILES"] = '' review_data, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--new', 'gerrit', ["--url", self._url], env) print(review_data) review_data = json.loads(review_data) lbls = review_data["labels"] self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], "CodeChecker found 5 issue(s) in the code.") self.assertEqual(review_data["tag"], "jenkins") comments = review_data["comments"] self.assertEqual(len(comments), 1) file_path = next(iter(comments)) reports = comments[file_path] self.assertEqual(len(reports), 5) for report in reports: self.assertIn("message", report) self.assertIn("range", report) range = report["range"] self.assertIn("start_line", range) self.assertIn("start_character", range) self.assertIn("end_line", range) self.assertIn("end_character", range) def test_set_env_diff_gerrit_output(self): """Test gerrit output when using diff and set env vars. Only the reports which belong to the changed files should be in the gerrit review json. """ export_dir = os.path.join(self._local_reports, "export_dir2") env = self._env.copy() env["CC_REPO_DIR"] = self._local_test_project report_url = "localhost:8080/index.html" env["CC_REPORT_URL"] = report_url changed_file_path = os.path.join(self._local_reports, 'files_changed') with open(changed_file_path, 'w', encoding="utf-8", errors="ignore") as changed_file: # Print some garbage value to the file. changed_file.write(")]}'\n") changed_files = { "/COMMIT_MSG": {}, "divide_zero.cpp": {}} changed_file.write(json.dumps(changed_files)) env["CC_CHANGED_FILES"] = changed_file_path _, err, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--unresolved', 'gerrit', ["--url", self._url, "-e", export_dir]) self.assertIn("'CC_REPO_DIR'", err) self.assertIn("'CC_CHANGED_FILES'", err) self.assertIn("needs to be set", err) get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'gerrit', ["--url", self._url, "-e", export_dir], env) gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) with open(gerrit_review_file, 'r', encoding="utf-8", errors="ignore") as rw_file: review_data = json.load(rw_file) lbls = review_data["labels"] self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertIn( "CodeChecker found 25 issue(s) in the code.", review_data["message"]) self.assertIn(f"See: '{report_url}'", review_data["message"]) self.assertEqual(review_data["tag"], "jenkins") # Because the CC_CHANGED_FILES is set we will see reports only for # the divide_zero.cpp function in the comments section. comments = review_data["comments"] self.assertEqual(len(comments), 1) reports = comments["divide_zero.cpp"] self.assertEqual(len(reports), 4) shutil.rmtree(export_dir, ignore_errors=True) def test_diff_codeclimate_output(self): """ Test codeclimate output when using diff and set env vars. """ export_dir = os.path.join(self._local_reports, "export_dir") env = self._env.copy() env["CC_REPO_DIR"] = self._local_test_project get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'codeclimate', ["--url", self._url, "-e", export_dir], env) codeclimate_issues_file = os.path.join(export_dir, 'codeclimate_issues.json') self.assertTrue(os.path.exists(codeclimate_issues_file)) with open(codeclimate_issues_file, 'r', encoding="utf-8", errors="ignore") as rw_file: issues = json.load(rw_file) for issue in issues: self.assertEqual(issue["type"], "issue") self.assertTrue(issue["check_name"]) self.assertEqual(issue["categories"], ["Bug Risk"]) self.assertTrue(issue["fingerprint"]) self.assertTrue(issue["location"]["path"]) self.assertTrue(issue["location"]["lines"]["begin"]) malloc_issues = [i for i in issues if i["check_name"] == "unix.Malloc"] self.assertEqual(malloc_issues, [{ "type": "issue", "check_name": "unix.Malloc", "description": "Memory allocated by alloca() should not be " "deallocated", "categories": [ "Bug Risk" ], "fingerprint": "c2132f78ef0e01bdb5eacf616048625f", "severity": "minor", "location": { "path": "new_delete.cpp", "lines": { "begin": 31 } }}]) shutil.rmtree(export_dir, ignore_errors=True) def test_diff_no_trim_codeclimate_output(self): """ Test codeclimate output when using diff and don't set env vars. """ export_dir_path = os.path.join(self._local_reports, "export_dir") get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', "codeclimate", ["-e", export_dir_path, "--url", self._url], self._env) issues_file_path = os.path.join(export_dir_path, 'codeclimate_issues.json') self.assertTrue(os.path.exists(issues_file_path)) with open(issues_file_path, 'r', encoding="utf-8", errors="ignore") as f: issues = json.load(f) malloc_issues = [i for i in issues if i["check_name"] == "unix.Malloc"] self.assertNotEqual(len(malloc_issues), 0) file_path = malloc_issues[0]["location"]["path"] self.assertTrue(os.path.isabs(file_path)) self.assertTrue(file_path.endswith(f"/new_delete.cpp")) shutil.rmtree(export_dir_path, ignore_errors=True) def test_diff_multiple_output(self): """ Test multiple output type for diff command. """ export_dir = os.path.join(self._local_reports, "export_dir3") env = self._env.copy() env["CC_REPO_DIR"] = '' env["CC_CHANGED_FILES"] = '' out, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--resolved', None, ["-o", "html", "gerrit", "plaintext", "-e", export_dir, "--url", self._url], env) print(out) # Check the plaintext output. count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 4) # Check that the gerrit output json file was generated. gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) # Check that index.html output was generated. index_html = os.path.join(export_dir, 'index.html') self.assertTrue(os.path.exists(index_html)) shutil.rmtree(export_dir, ignore_errors=True) def test_diff_remote_local_resolved_same(self): """ Test for resolved reports on same list remotely and locally. """ out, _, _ = get_diff_results( [self._run_names[0]], [self._remote_reports], '--resolved', 'json', ["--url", self._url]) self.assertEqual(out, []) def test_local_to_remote_with_baseline_file(self): """ Get reports based on a baseline file given to the basename option. """ baseline_file_path = create_baseline_file(self._local_reports) # Get new reports. new_results, _, returncode = get_diff_results( [baseline_file_path], [self._run_names[0]], '--new', 'json', ["--url", self._url]) print(new_results) for report in new_results: self.assertEqual(report['checker_name'], "core.NullDereference") self.assertEqual(returncode, 2) # Get unresolved reports. unresolved_results, err, returncode = get_diff_results( [baseline_file_path], [self._run_names[0]], '--unresolved', 'json', ["--url", self._url]) print(unresolved_results) self.assertTrue(unresolved_results) self.assertFalse(any( r for r in unresolved_results if r['checker_name'] == 'core.CallAndMessage')) self.assertEqual(returncode, 2) # Get resolved reports. resolved_results, err, returncode = get_diff_results( [baseline_file_path], [self._run_names[0]], '--resolved', 'json', ["--url", self._url]) print(resolved_results) self.assertFalse(resolved_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: ", err) def test_remote_to_local_with_baseline_file(self): """ Get reports based on a baseline file given to the newname option. """ baseline_file_path = create_baseline_file(self._local_reports) # Get new reports. res, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--new', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) new_hashes = sorted(set([n['report_hash'] for n in res])) new_results, err, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--new', 'json', ["--url", self._url]) print(new_results) self.assertFalse(new_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: " + ', '.join(new_hashes), err) # Get unresolved reports. res, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--unresolved', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) unresolved_hashes = sorted(set([n['report_hash'] for n in res])) unresolved_results, err, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--unresolved', 'json', ["--url", self._url]) print(unresolved_results) self.assertFalse(unresolved_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: " + ', '.join(unresolved_hashes), err) # Get resolved reports. res, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--resolved', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) resolved_hashes = set([n['report_hash'] for n in res]) resolved_results, _, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--resolved', 'json', ["--url", self._url]) print(resolved_results) self.assertTrue(resolved_results) self.assertSetEqual( {r['report_hash'] for r in resolved_results}, resolved_hashes) self.assertEqual(returncode, 2)
1
13,975
I don't see any test cases which would test that fix dates are set properly on review status changes / storage events. For this reason please create some more test cases and also check my scenarios above.
Ericsson-codechecker
c
@@ -109,6 +109,10 @@ namespace Microsoft.Rest.Generator foreach (var method in client.Methods) { NormalizeMethod(method); + if (method.DefaultResponse != null && method.DefaultResponse is CompositeType) + { + client.Exceptions.Add((CompositeType)method.DefaultResponse); + } } }
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Linq; using System.Net; using System.Text; using Microsoft.Rest.Generator.ClientModel; using Microsoft.Rest.Generator.Logging; using Microsoft.Rest.Generator.Properties; using Microsoft.Rest.Generator.Utilities; namespace Microsoft.Rest.Generator { public abstract class CodeNamer { private static readonly IDictionary<char, string> basicLaticCharacters; protected CodeNamer() { ReservedWords = new HashSet<string>(); } /// <summary> /// Gets collection of reserved words. /// </summary> public HashSet<string> ReservedWords { get; private set; } /// <summary> /// Formats segments of a string split by underscores or hyphens into "Camel" case strings. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public static string CamelCase(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return name.Split('_', '-', ' ') .Where(s => !string.IsNullOrEmpty(s)) .Select((s, i) => FormatCase(s, i == 0)) // Pass true/toLower for just the first element. .DefaultIfEmpty("") .Aggregate(string.Concat); } /// <summary> /// Formats segments of a string split by underscores or hyphens into "Pascal" case. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public static string PascalCase(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return name.Split('_', '-', ' ') .Where(s => !string.IsNullOrEmpty(s)) .Select(s => FormatCase(s, false)) .DefaultIfEmpty("") .Aggregate(string.Concat); } /// <summary> /// Recursively normalizes names in the client model /// </summary> /// <param name="client"></param> public virtual void NormalizeClientModel(ServiceClient client) { if (client == null) { throw new ArgumentNullException("client"); } client.Name = GetTypeName(client.Name); client.Namespace = GetNamespaceName(client.Namespace); foreach (var property in client.Properties) { property.Name = GetPropertyName(property.Name); property.Type = NormalizeTypeReference(property.Type); } var normalizedModels = new List<CompositeType>(); foreach (var modelType in client.ModelTypes) { normalizedModels.Add(NormalizeTypeDeclaration(modelType) as CompositeType); } client.ModelTypes.Clear(); normalizedModels.ForEach( (item) => client.ModelTypes.Add(item)); var normalizedEnums = new List<EnumType>(); foreach (var enumType in client.EnumTypes) { var normalizedType = NormalizeTypeDeclaration(enumType) as EnumType; if (normalizedType != null) { normalizedEnums.Add(NormalizeTypeDeclaration(enumType) as EnumType); } } client.EnumTypes.Clear(); normalizedEnums.ForEach((item) => client.EnumTypes.Add(item)); foreach (var method in client.Methods) { NormalizeMethod(method); } } /// <summary> /// Normalizes names in the method /// </summary> /// <param name="method"></param> public virtual void NormalizeMethod(Method method) { if (method == null) { throw new ArgumentNullException("method"); } method.Name = GetMethodName(method.Name); method.Group = GetMethodGroupName(method.Group); method.ReturnType = NormalizeTypeReference(method.ReturnType); method.DefaultResponse = NormalizeTypeReference(method.DefaultResponse); var normalizedResponses = new Dictionary<HttpStatusCode, IType>(); foreach (var statusCode in method.Responses.Keys) { normalizedResponses[statusCode] = NormalizeTypeReference(method.Responses[statusCode]); } method.Responses.Clear(); foreach (var statusCode in normalizedResponses.Keys) { method.Responses[statusCode] = normalizedResponses[statusCode]; } foreach (var parameter in method.Parameters) { parameter.Name = GetParameterName(parameter.Name); parameter.Type = NormalizeTypeReference(parameter.Type); } foreach (var parameterTransformation in method.InputParameterTransformation) { parameterTransformation.OutputParameter.Name = GetParameterName(parameterTransformation.OutputParameter.Name); parameterTransformation.OutputParameter.Type = NormalizeTypeReference(parameterTransformation.OutputParameter.Type); foreach (var parameterMapping in parameterTransformation.ParameterMappings) { if (parameterMapping.InputParameterProperty != null) { parameterMapping.InputParameterProperty = string.Join(".", parameterMapping.InputParameterProperty.Split(new[] { '.' }, StringSplitOptions.RemoveEmptyEntries) .Select(p => GetPropertyName(p))); } if (parameterMapping.OutputParameterProperty != null) { parameterMapping.OutputParameterProperty = string.Join(".", parameterMapping.OutputParameterProperty.Split(new[] { '.' }, StringSplitOptions.RemoveEmptyEntries) .Select(p => GetPropertyName(p))); } } } } /// <summary> /// Formats a string for naming members of an enum using Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetEnumMemberName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return PascalCase(RemoveInvalidCharacters(name)); } /// <summary> /// Formats a string for naming fields using a prefix '_' and VariableName Camel case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetFieldName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return '_' + GetVariableName(name); } /// <summary> /// Formats a string for naming interfaces using a prefix 'I' and Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetInterfaceName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return "I" + PascalCase(RemoveInvalidCharacters(name)); } /// <summary> /// Formats a string for naming a method using Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetMethodName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return PascalCase(RemoveInvalidCharacters(GetEscapedReservedName(name, "Operation"))); } /// <summary> /// Formats a string for identifying a namespace using Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetNamespaceName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return PascalCase(RemoveInvalidCharactersNamespace(name)); } /// <summary> /// Formats a string for naming method parameters using GetVariableName Camel case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetParameterName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return GetVariableName(GetEscapedReservedName(name, "Parameter")); } /// <summary> /// Formats a string for naming properties using Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetPropertyName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return PascalCase(RemoveInvalidCharacters(GetEscapedReservedName(name, "Property"))); } /// <summary> /// Formats a string for naming a Type or Object using Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetTypeName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return PascalCase(RemoveInvalidCharacters(GetEscapedReservedName(name, "Model"))); } /// <summary> /// Formats a string for naming a Method Group using Pascal case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetMethodGroupName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return PascalCase(RemoveInvalidCharacters(GetEscapedReservedName(name, "Model"))); } /// <summary> /// Formats a string for naming a local variable using Camel case by default. /// </summary> /// <param name="name"></param> /// <returns>The formatted string.</returns> public virtual string GetVariableName(string name) { if (string.IsNullOrWhiteSpace(name)) { return name; } return CamelCase(RemoveInvalidCharacters(GetEscapedReservedName(name, "Variable"))); } /// <summary> /// Returns language specific type reference name. /// </summary> /// <param name="type"></param> /// <returns></returns> public abstract IType NormalizeTypeReference(IType type); /// <summary> /// Returns language specific type declaration name. /// </summary> /// <param name="type"></param> /// <returns></returns> public abstract IType NormalizeTypeDeclaration(IType type); /// <summary> /// Formats a string as upper or lower case. Two-letter inputs that are all upper case are both lowered. /// Example: ID = > id, Ex => ex /// </summary> /// <param name="name"></param> /// <param name="toLower"></param> /// <returns>The formatted string.</returns> private static string FormatCase(string name, bool toLower) { if (!string.IsNullOrEmpty(name)) { if (name.Length < 2 || (name.Length == 2 && char.IsUpper(name[0]) && char.IsUpper(name[1]))) { name = toLower ? name.ToLowerInvariant() : name.ToUpperInvariant(); } else { name = (toLower ? char.ToLowerInvariant(name[0]) : char.ToUpperInvariant(name[0])) + name.Substring(1, name.Length - 1); } } return name; } /// <summary> /// Removes invalid characters from the name. Everything but alpha-numeral, underscore, /// and dash. /// </summary> /// <param name="name">String to parse.</param> /// <returns>Name with invalid characters removed.</returns> public static string RemoveInvalidCharacters(string name) { return GetValidName(name, '_', '-'); } /// <summary> /// Removes invalid characters from the namespace. Everything but alpha-numeral, underscore, /// period, and dash. /// </summary> /// <param name="name">String to parse.</param> /// <returns>Namespace with invalid characters removed.</returns> protected virtual string RemoveInvalidCharactersNamespace(string name) { return GetValidName(name, '_', '-', '.'); } /// <summary> /// Gets valid name for the identifier. /// </summary> /// <param name="name">String to parse.</param> /// <param name="allowerCharacters">Allowed characters.</param> /// <returns>Name with invalid characters removed.</returns> private static string GetValidName(string name, params char[] allowerCharacters) { var correctName = RemoveInvalidCharacters(name, allowerCharacters); // here we have only letters and digits or an empty string if (string.IsNullOrEmpty(correctName) || basicLaticCharacters.ContainsKey(correctName[0])) { var sb = new StringBuilder(); foreach (char symbol in name) { if (basicLaticCharacters.ContainsKey(symbol)) { sb.Append(basicLaticCharacters[symbol]); } else { sb.Append(symbol); } } correctName = RemoveInvalidCharacters(sb.ToString(), allowerCharacters); } // if it is still empty string, throw if (correctName.IsNullOrEmpty()) { throw new ArgumentException(string.Format(CultureInfo.InvariantCulture, Resources.InvalidIdentifierName, name)); } return correctName; } /// <summary> /// Removes invalid characters from the name. /// </summary> /// <param name="name">String to parse.</param> /// <param name="allowerCharacters">Allowed characters.</param> /// <returns>Name with invalid characters removed.</returns> private static string RemoveInvalidCharacters(string name, params char[] allowerCharacters) { return new string(name.Replace("[]", "Sequence") .Where(c => char.IsLetterOrDigit(c) || allowerCharacters.Contains(c)) .ToArray()); } /// <summary> /// If the provided name is a reserved word in a programming language then the method converts the /// name by appending the provided appendValue /// </summary> /// <param name="name">Name.</param> /// <param name="appendValue">String to append.</param> /// <returns>The transformed reserved name</returns> protected virtual string GetEscapedReservedName(string name, string appendValue) { if (name == null) { throw new ArgumentNullException("name"); } if (appendValue == null) { throw new ArgumentNullException("appendValue"); } if (ReservedWords.Contains(name, StringComparer.OrdinalIgnoreCase)) { name += appendValue; } return name; } /// <summary> /// Resolves name collisions in the client model by iterating over namespaces (if provided, /// model names, client name, and client method groups. /// </summary> /// <param name="serviceClient">Service client to process.</param> /// <param name="clientNamespace">Client namespace or null.</param> /// <param name="modelNamespace">Client model namespace or null.</param> public virtual void ResolveNameCollisions(ServiceClient serviceClient, string clientNamespace, string modelNamespace) { if (serviceClient == null) { throw new ArgumentNullException("serviceClient"); } // take all namespaces of Models var exclusionListQuery = SplitNamespaceAndIgnoreLast(modelNamespace) .Union(SplitNamespaceAndIgnoreLast(clientNamespace)); var exclusionDictionary = new Dictionary<string, string>(exclusionListQuery .Where(s => !string.IsNullOrWhiteSpace(s)) .ToDictionary(s => s, v => "namespace"), StringComparer.OrdinalIgnoreCase); var models = new List<CompositeType>(serviceClient.ModelTypes); serviceClient.ModelTypes.Clear(); foreach (var model in models) { model.Name = ResolveNameConflict( exclusionDictionary, model.Name, "Schema definition", "Model"); serviceClient.ModelTypes.Add(model); foreach (var property in model.Properties) { if (property.Name.Equals(model.Name, StringComparison.OrdinalIgnoreCase)) { property.Name += "Property"; } } } var enumTypes = new List<EnumType>(serviceClient.EnumTypes); serviceClient.EnumTypes.Clear(); foreach (var enumType in enumTypes) { enumType.Name = ResolveNameConflict( exclusionDictionary, enumType.Name, "Enum name", "Enum"); serviceClient.EnumTypes.Add(enumType); } serviceClient.Name = ResolveNameConflict( exclusionDictionary, serviceClient.Name, "Client", "Client"); ResolveMethodGroupNameCollision(serviceClient, exclusionDictionary); } /// <summary> /// Resolves name collisions in the client model for method groups (operations). /// </summary> /// <param name="serviceClient"></param> /// <param name="exclusionDictionary"></param> protected virtual void ResolveMethodGroupNameCollision(ServiceClient serviceClient, Dictionary<string, string> exclusionDictionary) { if (serviceClient == null) { throw new ArgumentNullException("serviceClient"); } if (exclusionDictionary == null) { throw new ArgumentNullException("exclusionDictionary"); } var methodGroups = serviceClient.MethodGroups.ToList(); foreach (var methodGroup in methodGroups) { var resolvedName = ResolveNameConflict( exclusionDictionary, methodGroup, "Client operation", "Operations"); foreach (var method in serviceClient.Methods) { if (method.Group == methodGroup) { method.Group = resolvedName; } } } } private static string ResolveNameConflict( Dictionary<string, string> exclusionDictionary, string typeName, string type, string suffix) { string resolvedName = typeName; var sb = new StringBuilder(); sb.AppendLine(); while (exclusionDictionary.ContainsKey(resolvedName)) { sb.AppendLine(string.Format(CultureInfo.InvariantCulture, Resources.NamespaceConflictReasonMessage, resolvedName, exclusionDictionary[resolvedName])); resolvedName += suffix; } if (!string.Equals(resolvedName, typeName, StringComparison.OrdinalIgnoreCase)) { sb.AppendLine(Resources.NamingConflictsSuggestion); Logger.LogWarning( string.Format( CultureInfo.InvariantCulture, Resources.EntityConflictTitleMessage, type, typeName, resolvedName, sb)); } exclusionDictionary.Add(resolvedName, type); return resolvedName; } private static IEnumerable<string> SplitNamespaceAndIgnoreLast(string nameSpace) { if (string.IsNullOrEmpty(nameSpace)) { return Enumerable.Empty<string>(); } var namespaceWords = nameSpace.Split(new[] {'.'}, StringSplitOptions.RemoveEmptyEntries); if (namespaceWords.Length < 1) { return Enumerable.Empty<string>(); } // else we do not need the last part of the namespace return namespaceWords.Take(namespaceWords.Length - 1); } [SuppressMessage("Microsoft.Performance", "CA1810:InitializeReferenceTypeStaticFieldsInline")] static CodeNamer() { basicLaticCharacters = new Dictionary<char, string>(); basicLaticCharacters[(char)32] = "Space"; basicLaticCharacters[(char)33] = "ExclamationMark"; basicLaticCharacters[(char)34] = "QuotationMark"; basicLaticCharacters[(char)35] = "NumberSign"; basicLaticCharacters[(char)36] = "DollarSign"; basicLaticCharacters[(char)37] = "PercentSign"; basicLaticCharacters[(char)38] = "Ampersand"; basicLaticCharacters[(char)39] = "Apostrophe"; basicLaticCharacters[(char)40] = "LeftParenthesis"; basicLaticCharacters[(char)41] = "RightParenthesis"; basicLaticCharacters[(char)42] = "Asterisk"; basicLaticCharacters[(char)43] = "PlusSign"; basicLaticCharacters[(char)44] = "Comma"; basicLaticCharacters[(char)45] = "HyphenMinus"; basicLaticCharacters[(char)46] = "FullStop"; basicLaticCharacters[(char)47] = "Slash"; basicLaticCharacters[(char)48] = "Zero"; basicLaticCharacters[(char)49] = "One"; basicLaticCharacters[(char)50] = "Two"; basicLaticCharacters[(char)51] = "Three"; basicLaticCharacters[(char)52] = "Four"; basicLaticCharacters[(char)53] = "Five"; basicLaticCharacters[(char)54] = "Six"; basicLaticCharacters[(char)55] = "Seven"; basicLaticCharacters[(char)56] = "Eight"; basicLaticCharacters[(char)57] = "Nine"; basicLaticCharacters[(char)58] = "Colon"; basicLaticCharacters[(char)59] = "Semicolon"; basicLaticCharacters[(char)60] = "LessThanSign"; basicLaticCharacters[(char)61] = "EqualSign"; basicLaticCharacters[(char)62] = "GreaterThanSign"; basicLaticCharacters[(char)63] = "QuestionMark"; basicLaticCharacters[(char)64] = "AtSign"; basicLaticCharacters[(char)91] = "LeftSquareBracket"; basicLaticCharacters[(char)92] = "Backslash"; basicLaticCharacters[(char)93] = "RightSquareBracket"; basicLaticCharacters[(char)94] = "CircumflexAccent"; basicLaticCharacters[(char)95] = "LowLine"; basicLaticCharacters[(char)96] = "GraveAccent"; basicLaticCharacters[(char)123] = "LeftCurlyBracket"; basicLaticCharacters[(char)124] = "VerticalBar"; basicLaticCharacters[(char)125] = "RightCurlyBracket"; basicLaticCharacters[(char)126] = "Tilde"; } } }
1
21,402
This belongs to SwaggerModeler.cs `public override ServiceClient Build()`
Azure-autorest
java
@@ -286,9 +286,11 @@ export default () => { * Defines column widths in pixels. Accepts number, string (that will be converted to a number), array of numbers * (if you want to define column width separately for each column) or a function (if you want to set column width * dynamically on each render). + * + * Note that this option will disable {@link AutoColumnSize} plugin. * * @memberof Options# - * @type {number|number[]|string|string[]|Function} + * @type {number|(number|undefined)[]|string|(string|undefined)[]|Function} * @default undefined * * @example
1
import { isDefined } from '../../helpers/mixed'; import { isObjectEqual } from '../../helpers/object'; /* eslint-disable jsdoc/require-description-complete-sentence */ /** * @alias Options * @class Options * @description * * ## Constructor options. * * Constructor options are applied using an object literal passed as a second argument to the Handsontable constructor. * * ```js * const container = document.getElementById('example'); * const hot = new Handsontable(container, { * data: myArray, * width: 400, * height: 300 * }); * ``` * * --- * ## Cascading configuration. * * Handsontable is using *Cascading Configuration*, which is a fast way to provide configuration options * for the entire table, including its columns and particular cells. * * Consider the following example: * ```js * const container = document.getElementById('example'); * const hot = new Handsontable(container, { * readOnly: true, * columns: [ * {readOnly: false}, * {}, * {}, * ], * cells: function(row, col, prop) { * var cellProperties = {}; * * if (row === 0 && col === 0) { * cellProperties.readOnly = true; * }. * * return cellProperties; * } * }); * ``` * * The above notation will result in all TDs being *read only*, except for first column TDs which will be *editable*, except for the TD in top left corner which will still be *read only*. * * ### The Cascading Configuration model * * ##### 1. Constructor * * Configuration options that are provided using first-level `handsontable(container, {option: "value"})` and `updateSettings` method. * * ##### 2. Columns * * Configuration options that are provided using second-level object `handsontable(container, {columns: {option: "value"}]})` * * ##### 3. Cells * * Configuration options that are provided using third-level function `handsontable(container, {cells: function: (row, col, prop){ }})` * * --- * ## Architecture performance * * The Cascading Configuration model is based on prototypical inheritance. It is much faster and memory efficient * compared to the previous model that used jQuery extend. See: [http://jsperf.com/extending-settings](http://jsperf.com/extending-settings). * * --- * __Important notice:__ In order for the data separation to work properly, make sure that each instance of Handsontable has a unique `id`. */ /* eslint-enable jsdoc/require-description-complete-sentence */ export default () => { return { /** * License key for commercial version of Handsontable. * * @memberof Options# * @type {string} * @default undefined * @example * ```js * licenseKey: '00000-00000-00000-00000-00000', * // or * licenseKey: 'non-commercial-and-evaluation', * ``` */ licenseKey: void 0, /** * @description * Initial data source that will be bound to the data grid __by reference__ (editing data grid alters the data source). * Can be declared as an array of arrays or an array of objects. * * See [Understanding binding as reference](https://docs.handsontable.com/tutorial-data-binding.html#page-reference). * * @memberof Options# * @type {Array[]|object[]} * @default undefined * @example * ```js * // as an array of arrays * data: [ * ['A', 'B', 'C'], * ['D', 'E', 'F'], * ['G', 'H', 'J'] * ] * * // as an array of objects * data: [ * {id: 1, name: 'Ted Right'}, * {id: 2, name: 'Frank Honest'}, * {id: 3, name: 'Joan Well'}, * {id: 4, name: 'Gail Polite'}, * {id: 5, name: 'Michael Fair'}, * ] * ``` */ data: void 0, /** * @description * Defines the structure of a new row when data source is an array of objects. * * See [data-schema](https://docs.handsontable.com/tutorial-data-sources.html#page-data-schema) for more options. * * @memberof Options# * @type {object} * @default undefined * * @example * ``` * // with data schema we can start with an empty table * data: null, * dataSchema: {id: null, name: {first: null, last: null}, address: null}, * colHeaders: ['ID', 'First Name', 'Last Name', 'Address'], * columns: [ * {data: 'id'}, * {data: 'name.first'}, * {data: 'name.last'}, * {data: 'address'} * ], * startRows: 5, * minSpareRows: 1 * ``` */ dataSchema: void 0, /** * Width of the grid. Can be a value or a function that returns a value. * * @memberof Options# * @type {number|string|Function} * @default undefined * * @example * ``` * // as a number * width: 500, * * // as a string * width: '75vw', * * // as a function * width: function() { * return 500; * }, * ``` */ width: void 0, /** * Height of the grid. Can be a number or a function that returns a number. * * @memberof Options# * @type {number|string|Function} * @default undefined * * @example * ```js * // as a number * height: 500, * * // as a string * height: '75vh', * * // as a function * height: function() { * return 500; * }, * ``` */ height: void 0, /** * @description * Initial number of rows. * * __Note:__ This option only has effect in Handsontable constructor and only if `data` option is not provided. * * @memberof Options# * @type {number} * @default 5 * * @example * ```js * // start with 15 empty rows * startRows: 15, * ``` */ startRows: 5, /** * @description * Initial number of columns. * * __Note:__ This option only has effect in Handsontable constructor and only if `data` option is not provided. * * @memberof Options# * @type {number} * @default 5 * * @example * ```js * // start with 15 empty columns * startCols: 15, * ``` */ startCols: 5, /** * Setting `true` or `false` will enable or disable the default row headers (1, 2, 3). * You can also define an array `['One', 'Two', 'Three', ...]` or a function to define the headers. * If a function is set the index of the row is passed as a parameter. * * @memberof Options# * @type {boolean|string[]|Function} * @default undefined * * @example * ```js * // as a boolean * rowHeaders: true, * * // as an array * rowHeaders: ['1', '2', '3'], * * // as a function * rowHeaders: function(index) { * return index + ': AB'; * }, * ``` */ rowHeaders: void 0, /** * Setting `true` or `false` will enable or disable the default column headers (A, B, C). * You can also define an array `['One', 'Two', 'Three', ...]` or a function to define the headers. * If a function is set, then the index of the column is passed as a parameter. * * @memberof Options# * @type {boolean|string[]|Function} * @default null * * @example * ```js * // as a boolean * colHeaders: true, * * // as an array * colHeaders: ['A', 'B', 'C'], * * // as a function * colHeaders: function(index) { * return index + ': AB'; * }, * ``` */ colHeaders: null, /** * Defines column widths in pixels. Accepts number, string (that will be converted to a number), array of numbers * (if you want to define column width separately for each column) or a function (if you want to set column width * dynamically on each render). * * @memberof Options# * @type {number|number[]|string|string[]|Function} * @default undefined * * @example * ```js * // as a number, for each column. * colWidths: 100, * * // as a string, for each column. * colWidths: '100px', * * // as an array, based on visual indexes. The rest of the columns have a default width. * colWidths: [100, 120, 90], * * // as a function, based on visual indexes. * colWidths: function(index) { * return index * 10; * }, * ``` */ colWidths: void 0, /** * Defines row heights in pixels. Accepts numbers, strings (that will be converted into a number), array of numbers * (if you want to define row height separately for each row) or a function (if you want to set row height dynamically * on each render). * * If the {@link ManualRowResize} or {@link AutoRowSize} plugins are enabled, this is also the minimum height that can * be set via either of those two plugins. * * Height should be equal or greater than 23px. Table is rendered incorrectly if height is less than 23px. * * @memberof Options# * @type {number|number[]|string|string[]|Function} * @default undefined * * @example * ```js * // as a number, the same for all rows * rowHeights: 100, * * // as a string, the same for all row * rowHeights: '100px', * * // as an array, based on visual indexes. The rest of the rows have a default height * rowHeights: [100, 120, 90], * * // as a function, based on visual indexes * rowHeights: function(index) { * return index * 10; * }, * ``` */ rowHeights: void 0, /** * @description * Defines the cell properties and data binding for certain columns. * * __Note:__ Using this option sets a fixed number of columns (options `startCols`, `minCols`, `maxCols` will be ignored). * * See [documentation -> datasources.html](https://docs.handsontable.com/tutorial-data-sources.html#page-nested) for examples. * * @memberof Options# * @type {object[]|Function} * @default undefined * * @example * ```js * // as an array of objects * // order of the objects in array is representation of physical indexes. * columns: [ * { * // column options for the first column * type: 'numeric', * numericFormat: { * pattern: '0,0.00 $' * } * }, * { * // column options for the second column * type: 'text', * readOnly: true * } * ], * * // or as a function, based on physical indexes * columns: function(index) { * return { * type: index > 0 ? 'numeric' : 'text', * readOnly: index < 1 * } * } * ``` */ columns: void 0, /** * @description * Defines the cell properties for given `row`, `col`, `prop` coordinates. Any constructor or column option may be * overwritten for a particular cell (row/column combination) using the `cells` property in the Handsontable constructor. * * __Note:__ Parameters `row` and `col` always represent __physical indexes__. Example below show how to execute * operations based on the __visual__ representation of Handsontable. * * Possible values of `prop`: * - property name for column's data source object, when dataset is an [array of objects](https://handsontable.com/docs/tutorial-data-sources.html#page-object) * - the same number as `col`, when dataset is an [array of arrays](https://handsontable.com/docs/tutorial-data-sources.html#page-array). * * @memberof Options# * @type {Function} * @default undefined * * @example * ```js * cells: function(row, column, prop) { * const cellProperties = { readOnly: false }; * const visualRowIndex = this.instance.toVisualRow(row); * const visualColIndex = this.instance.toVisualColumn(column); * * if (visualRowIndex === 0 && visualColIndex === 0) { * cellProperties.readOnly = true; * } * * return cellProperties; * }, * ``` */ cells: void 0, /** * Any constructor or column option may be overwritten for a particular cell (row/column combination), using `cell` * array passed to the Handsontable constructor. * * @memberof Options# * @type {Array[]} * @default [] * * @example * ```js * // make cell with coordinates (0, 0) read only * cell: [ * { * row: 0, * col: 0, * readOnly: true * } * ], * ``` */ cell: [], /** * @description * If `true`, enables the {@link Comments} plugin, which enables an option to apply cell comments through the context menu * (configurable with context menu keys `commentsAddEdit`, `commentsRemove`). * * To initialize Handsontable with predefined comments, provide cell coordinates and comment text values in a form of * an array. * * See [Comments](https://docs.handsontable.com/demo-comments_.html) demo for examples. * * @memberof Options# * @type {boolean|object[]} * @default false * * @example * ```js * // enable comments plugin * comments: true, * * // or an object with extra predefined plugin config: * * comments: { * displayDelay: 1000 * } * * // or * // enable comments plugin and add predefined comments * const hot = new Handsontable(document.getElementById('example'), { * data: getData(), * comments: true, * cell: [ * { row: 1, col: 1, comment: { value: 'Foo' } }, * { row: 2, col: 2, comment: { value: 'Bar' } } * ] * }); * ``` */ comments: false, /** * @description * If `true`, enables the {@link CustomBorders} plugin, which enables an option to apply custom borders through the context * menu (configurable with context menu key `borders`). To initialize Handsontable with predefined custom borders, * provide cell coordinates and border styles in a form of an array. * * See [Custom Borders](https://docs.handsontable.com/demo-custom-borders.html) demo for examples. * * @memberof Options# * @type {boolean|object[]} * @default false * * @example * ```js * // enable custom borders * customBorders: true, * * // or * // enable custom borders and start with predefined left border * customBorders: [ * { * range: { * from: { * row: 1, * col: 1 * }, * to: { * row: 3, * col: 4 * } * }, * left: { * width: 2, * color: 'red' * }, * right: {}, * top: {}, * bottom: {} * } * ], * * // or * customBorders: [ * { * row: 2, * col: 2, * left: { * width: 2, * color: 'red' * }, * right: { * width: 1, * color: 'green' * }, * top: '', * bottom: '' * } * ], * ``` */ customBorders: false, /** * Minimum number of rows. At least that number of rows will be created during initialization. * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // set minimum table size to 10 rows * minRows: 10, * ``` */ minRows: 0, /** * Minimum number of columns. At least that number of columns will be created during initialization. * Works only with an array data source. When data source in an object, you can only have as many columns * as defined in the first data row, data schema, or the `columns` setting. * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // set minimum table size to 10 columns * minCols: 10, * ``` */ minCols: 0, /** * Maximum number of rows. If set to a value lower than the initial row count, the data will be trimmed to the provided * value as the number of rows. * * @memberof Options# * @type {number} * @default Infinity * * @example * ```js * // limit table size to maximum 300 rows * maxRows: 300, * ``` */ maxRows: Infinity, /** * Maximum number of cols. If set to a value lower than the initial col count, the data will be trimmed to the provided * value as the number of cols. * * @memberof Options# * @type {number} * @default Infinity * * @example * ```js * // limit table size to maximum 300 columns * maxCols: 300, * ``` */ maxCols: Infinity, /** * When set to 1 (or more), Handsontable will add a new row at the end of grid if there are no more empty rows. * (unless the number of rows exceeds the one set in the `maxRows` property). * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // always add 3 empty rows at the table end * minSpareRows: 3, * ``` */ minSpareRows: 0, /** * When set to 1 (or more), Handsontable will add a new column at the end of grid if there are no more empty columns. * (unless the number of rows exceeds the one set in the `maxCols` property). * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // always add 3 empty columns at the table end * minSpareCols: 3, * ``` */ minSpareCols: 0, /** * If set to `false`, there won't be an option to insert new rows in the Context Menu. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // hide "Insert row above" and "Insert row below" options from the Context Menu * allowInsertRow: false, * ``` */ allowInsertRow: true, /** * If set to `false`, there won't be an option to insert new columns in the Context Menu. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // hide "Insert column left" and "Insert column right" options from the Context Menu * allowInsertColumn: false, * ``` */ allowInsertColumn: true, /** * If set to `false`, there won't be an option to remove rows in the Context Menu. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // hide "Remove row" option from the Context Menu * allowRemoveRow: false, * ``` */ allowRemoveRow: true, /** * If set to `false`, there won't be an option to remove columns in the Context Menu. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // hide "Remove column" option from the Context Menu * allowRemoveColumn: false, * ``` */ allowRemoveColumn: true, /** * @description * Defines how the table selection reacts. The selection support three different behaviors defined as: * * `'single'` Only a single cell can be selected. * * `'range'` Multiple cells within a single range can be selected. * * `'multiple'` Multiple ranges of cells can be selected. * * To see how to interact with selection by getting selected data or change styles of the selected cells go to * [https://docs.handsontable.com/demo-selecting-ranges.html](https://docs.handsontable.com/demo-selecting-ranges.html). * * @memberof Options# * @type {string} * @default 'multiple' * * @example * ```js * // only one cell can be selected at a time * selectionMode: 'single', * ``` */ selectionMode: 'multiple', /** * Enables the fill handle (drag-down and copy-down) functionality, which shows a small rectangle in bottom * right corner of the selected area, that let's you expand values to the adjacent cells. * * Setting to `true` enables the fillHandle plugin. Possible values: `true` (to enable in all directions), * `'vertical'` or `'horizontal'` (to enable in one direction), `false` (to disable completely), an object with * options: `autoInsertRow`, `direction`. * * If `autoInsertRow` option is `true`, fill-handler will create new rows till it reaches the last row. * It is enabled by default. * * @memberof Options# * @type {boolean|string|object} * @default true * * @example * ```js * // enable plugin in all directions and with autoInsertRow as true * fillHandle: true, * * // or * // enable plugin in vertical direction and with autoInsertRow as true * fillHandle: 'vertical', * * // or * fillHandle: { * // enable plugin in both directions and with autoInsertRow as false * autoInsertRow: false, * }, * * // or * fillHandle: { * // enable plugin in vertical direction and with autoInsertRow as false * autoInsertRow: false, * direction: 'vertical' * }, * ``` */ fillHandle: { autoInsertRow: false, }, /** * Allows to specify the number of fixed (or *frozen*) rows at the top of the table. * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // freeze the first 3 rows of the table. * fixedRowsTop: 3, * ``` */ fixedRowsTop: 0, /** * Allows to specify the number of fixed (or *frozen*) rows at the bottom of the table. * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // freeze the last 3 rows of the table. * fixedRowsBottom: 3, * ``` */ fixedRowsBottom: 0, /** * Allows to specify the number of fixed (or *frozen*) columns on the left of the table. * * @memberof Options# * @type {number} * @default 0 * * @example * ```js * // freeze first 3 columns of the table. * fixedColumnsLeft: 3, * ``` */ fixedColumnsLeft: 0, /** * If `true`, mouse click outside the grid will deselect the current selection. Can be a function that takes the * click event target and returns a boolean. * * @memberof Options# * @type {boolean|Function} * @default true * * @example * ```js * // don't clear current selection when mouse click was outside the grid * outsideClickDeselects: false, * * // or * outsideClickDeselects: function(event) { * return false; * } * ``` */ outsideClickDeselects: true, /** * If `true`, <kbd>ENTER</kbd> begins editing mode (like in Google Docs). If `false`, <kbd>ENTER</kbd> moves to next * row (like Excel) and adds a new row if necessary. <kbd>TAB</kbd> adds new column if necessary. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * enterBeginsEditing: false, * ``` */ enterBeginsEditing: true, /** * Defines the cursor movement after <kbd>ENTER</kbd> was pressed (<kbd>SHIFT</kbd> + <kbd>ENTER</kbd> uses a negative vector). Can * be an object or a function that returns an object. The event argument passed to the function is a DOM Event object * received after the <kbd>ENTER</kbd> key has been pressed. This event object can be used to check whether user pressed * <kbd>ENTER</kbd> or <kbd>SHIFT</kbd> + <kbd>ENTER</kbd>. * * @memberof Options# * @type {object|Function} * @default {col: 0, row: 1} * * @example * ```js * // move selection diagonal by 1 cell in x and y axis * enterMoves: {col: 1, row: 1}, * // or as a function * enterMoves: function(event) { * return {col: 1, row: 1}; * }, * ``` */ enterMoves: { col: 0, row: 1 }, /** * Defines the cursor movement after <kbd>TAB</kbd> is pressed (<kbd>SHIFT</kbd> + <kbd>TAB</kbd> uses a negative vector). Can * be an object or a function that returns an object. The event argument passed to the function is a DOM Event object * received after the <kbd>TAB</kbd> key has been pressed. This event object can be used to check whether user pressed * <kbd>TAB</kbd> or <kbd>SHIFT</kbd> + <kbd>TAB</kbd>. * * @memberof Options# * @type {object|Function} * @default {row: 0, col: 1} * * @example * ```js * // move selection 2 cells away after TAB pressed. * tabMoves: {row: 2, col: 2}, * // or as a function * tabMoves: function(event) { * return {row: 2, col: 2}; * }, * ``` */ tabMoves: { row: 0, col: 1 }, /** * If `true`, pressing <kbd>TAB</kbd> or right arrow in the last column will move to first column in next row. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // stop TAB key navigation on the last column * autoWrapRow: false, * ``` */ autoWrapRow: true, /** * If `true`, pressing <kbd>ENTER</kbd> or down arrow in the last row will move to the first row in the next column. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // stop ENTER key navigation on the last row * autoWrapCol: false, * ``` */ autoWrapCol: true, /** * @description * Turns on saving the state of column sorting, column positions and column sizes in local storage. * * You can save any sort of data in local storage to preserve table state between page reloads. In order to enable * data storage mechanism, `persistentState` option must be set to `true` (you can set it either during Handsontable * initialization or using the `updateSettings` method). When `persistentState` is enabled it exposes 3 hooks: * * __persistentStateSave__ (key: String, value: Mixed). * * * Saves value under given key in browser local storage. * * __persistentStateLoad__ (key: String, valuePlaceholder: Object). * * * Loads `value`, saved under given key, form browser local storage. The loaded `value` will be saved in * `valuePlaceholder.value` (this is due to specific behaviour of `Hooks.run()` method). If no value have * been saved under key `valuePlaceholder.value` will be `undefined`. * * __persistentStateReset__ (key: String). * * * Clears the value saved under `key`. If no `key` is given, all values associated with table will be cleared. * * __Note:__ The main reason behind using `persistentState` hooks rather than regular LocalStorage API is that it * ensures separation of data stored by multiple Handsontable instances. In other words, if you have two (or more) * instances of Handsontable on one page, data saved by one instance won't be accessible by the second instance. * Those two instances can store data under the same key and no data would be overwritten. * * __Important:__ In order for the data separation to work properly, make sure that each instance of Handsontable has a unique `id`. * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * // enable the persistent state plugin * persistentState: true, * ``` */ persistentState: void 0, /** * Class name for all visible rows in the current selection. * * @memberof Options# * @type {string} * @default undefined * * @example * ```js * // This will add a 'currentRow' class name to appropriate table cells. * currentRowClassName: 'currentRow', * ``` */ currentRowClassName: void 0, /** * Class name for all visible columns in the current selection. * * @memberof Options# * @type {string} * @default undefined * * @example * ```js * // This will add a 'currentColumn' class name to appropriate table cells. * currentColClassName: 'currentColumn', * ``` */ currentColClassName: void 0, /** * Class name for all visible headers in current selection. * * @memberof Options# * @type {string} * @default 'ht__highlight' * * @example * ```js * // This will add a 'ht__highlight' class name to appropriate table headers. * currentHeaderClassName: 'ht__highlight', * ``` */ currentHeaderClassName: 'ht__highlight', /** * Class name for all active headers in selections. The header will be marked with this class name * only when a whole column or row will be selected. * * @memberof Options# * @type {string} * @since 0.38.2 * @default 'ht__active_highlight' * * @example * ```js * // this will add a 'ht__active_highlight' class name to appropriate table headers. * activeHeaderClassName: 'ht__active_highlight', * ``` */ activeHeaderClassName: 'ht__active_highlight', /** * Class name for the current element. * The interpretation depends on the level on which this option is provided in the [cascading configuration](https://handsontable.com/docs/Options.html). * If `className` is provided on the first (constructor) level, it is the applied to the Handsontable container. * If `className` is provided on the second (`column`) or the third (`cell` or `cells`) level, it is applied to the table cell. * * @memberof Options# * @type {string|string[]} * @default undefined * * @example * ```js * // can be set as a string * className: 'your__class--name', * * // or as an array of strings * className: ['first-class-name', 'second-class-name'], * ``` */ className: void 0, /** * Class name for all tables inside container element. * * @memberof Options# * @type {string|string[]} * @default undefined * * @example * ```js * // set custom class for table element * tableClassName: 'your__class--name', * * // or * tableClassName: ['first-class-name', 'second-class-name'], * ``` */ tableClassName: void 0, /** * @description * Defines how the columns react, when the declared table width is different than the calculated sum of all column widths. * [See more](https://docs.handsontable.com/demo-stretching.html) mode. Possible values: * * `'none'` Disable stretching * * `'last'` Stretch only the last column * * `'all'` Stretch all the columns evenly. * * @memberof Options# * @type {string} * @default 'none' * * @example * ```js * // fit table to the container * stretchH: 'all', * ``` */ stretchH: 'none', /** * Overwrites the default `isEmptyRow` method, which checks if row at the provided index is empty. * * @memberof Options# * @type {Function} * @param {number} row Visual row index. * @returns {boolean} * * @example * ```js * // define custom checks for empty row * isEmptyRow: function(row) { * ... * }, * ``` */ isEmptyRow(row) { let col; let colLen; let value; let meta; for (col = 0, colLen = this.countCols(); col < colLen; col++) { value = this.getDataAtCell(row, col); if (value !== '' && value !== null && isDefined(value)) { if (typeof value === 'object') { meta = this.getCellMeta(row, col); return isObjectEqual(this.getSchema()[meta.prop], value); } return false; } } return true; }, /** * Overwrites the default `isEmptyCol` method, which checks if column at the provided index is empty. * * @memberof Options# * @type {Function} * @param {number} col Visual column index. * @returns {boolean} * * @example * ```js * // define custom checks for empty column * isEmptyCol: function(column) { * return false; * }, * ``` */ isEmptyCol(col) { let row; let rowLen; let value; for (row = 0, rowLen = this.countRows(); row < rowLen; row++) { value = this.getDataAtCell(row, col); if (value !== '' && value !== null && isDefined(value)) { return false; } } return true; }, /** * When set to `true`, the table is re-rendered when it is detected that it was made visible in DOM. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // don't rerender the table on visibility changes * observeDOMVisibility: false, * ``` */ observeDOMVisibility: true, /** * If set to `true`, Handsontable will accept values that were marked as invalid by the cell `validator`. It will * result with *invalid* cells being treated as *valid* (will save the *invalid* value into the Handsontable data source). * If set to `false`, Handsontable will *not* accept the invalid values and won't allow the user to close the editor. * This option will be particularly useful when used with the Autocomplete's `strict` mode. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // don't save the invalid values * allowInvalid: false, * ``` */ allowInvalid: true, /** * If set to `true`, Handsontable will accept values that are empty (`null`, `undefined` or `''`). If set * to `false`, Handsontable will *not* accept the empty values and mark cell as invalid. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // allow empty values for all cells (whole table) * allowEmpty: true, * * // or * columns: [ * { * data: 'date', * dateFormat: 'DD/MM/YYYY', * // allow empty values only for the 'date' column * allowEmpty: true * } * ], * ``` */ allowEmpty: true, /** * CSS class name for cells that did not pass validation. * * @memberof Options# * @type {string} * @default 'htInvalid' * * @example * ```js * // set custom validation error class * invalidCellClassName: 'highlight--error', * ``` */ invalidCellClassName: 'htInvalid', /** * When set to an non-empty string, displayed as the cell content for empty cells. If a value of a different type is provided, * it will be stringified and applied as a string. * * @memberof Options# * @type {string} * @default undefined * * @example * ```js * // add custom placeholder content to empty cells * placeholder: 'Empty Cell', * ``` */ placeholder: void 0, /** * CSS class name for cells that have a placeholder in use. * * @memberof Options# * @type {string} * @default 'htPlaceholder' * * @example * ```js * // set custom placeholder class * placeholderCellClassName: 'has-placeholder', * ``` */ placeholderCellClassName: 'htPlaceholder', /** * CSS class name for read-only cells. * * @memberof Options# * @type {string} * @default 'htDimmed' * * @example * ```js * // set custom read-only class * readOnlyCellClassName: 'is-readOnly', * ``` */ readOnlyCellClassName: 'htDimmed', /* eslint-disable jsdoc/require-description-complete-sentence */ /** * @description * If a string is provided, it may be one of the following predefined values: * * `autocomplete`, * * `checkbox`, * * `html`, * * `numeric`, * * `password`. * * `text`. * * Or you can [register](https://docs.handsontable.com/demo-custom-renderers.html) the custom renderer under specified name and use its name as an alias in your * configuration. * * If a function is provided, it will receive the following arguments: * ```js * function(instance, TD, row, col, prop, value, cellProperties) {} * ``` * * You can read more about custom renderes [in the documentation](https://docs.handsontable.com/demo-custom-renderers.html). * * @memberof Options# * @type {string|Function} * @default undefined * * @example * ```js * // register custom renderer * Handsontable.renderers.registerRenderer('my.renderer', function(instance, TD, row, col, prop, value, cellProperties) { * TD.innerHTML = value; * }); * * // use it for selected column: * columns: [ * { * // as a string with the name of build in renderer * renderer: 'autocomplete', * editor: 'select' * }, * { * // as an alias to custom renderer registered above * renderer: 'my.renderer' * }, * { * // renderer as custom function * renderer: function(hotInstance, TD, row, col, prop, value, cellProperties) { * TD.style.color = 'blue'; * TD.innerHTML = value; * } * } * ], * ``` */ renderer: void 0, /* eslint-enable jsdoc/require-description-complete-sentence */ /** * CSS class name added to the commented cells. * * @memberof Options# * @type {string} * @default 'htCommentCell' * * @example * ```js * // set custom class for commented cells * commentedCellClassName: 'has-comment', * ``` */ commentedCellClassName: 'htCommentCell', /** * If set to `true`, it enables the browser's native selection of a fragment of the text within a single cell, between * adjacent cells or in a whole table. If set to `'cell'`, it enables the possibility of selecting a fragment of the * text within a single cell's body. * * @memberof Options# * @type {boolean|string} * @default false * * @example * ```js * // enable text selection within table * fragmentSelection: true, * * // or * // enable text selection within cells only * fragmentSelection: 'cell', * ``` */ fragmentSelection: false, /** * @description * Makes cell, column or comment [read only](https://docs.handsontable.com/demo-read-only.html). * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * // set as read only * readOnly: true, * ``` */ readOnly: false, /** * @description * When added to a `column` property, it skips the column on paste and pastes the data on the next column to the right. * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * columns: [ * { * // don't paste data to this column * skipColumnOnPaste: true * } * ], * ``` */ skipColumnOnPaste: false, /** * @description * When added to a cell property, it skips the row on paste and pastes the data on the following row. * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * cells: function(row, column) { * const cellProperties = {}; * * // don't paste data to the second row * if (row === 1) { * cellProperties.skipRowOnPaste = true; * } * * return cellProperties; * } * ``` */ skipRowOnPaste: false, /** * @description * Setting to `true` enables the {@link Search} plugin (see [demo](https://docs.handsontable.com/demo-search-for-values.html)). * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * // enable search plugin * search: true, * * // or * // as an object with detailed configuration * search: { * searchResultClass: 'customClass', * queryMethod: function(queryStr, value) { * ... * }, * callback: function(instance, row, column, value, result) { * ... * } * } * ``` */ search: false, /** * @description * Shortcut to define the combination of the cell renderer, editor and validator for the column, cell or whole table. * * Possible values: * * [autocomplete](https://docs.handsontable.com/demo-autocomplete.html) * * [checkbox](https://docs.handsontable.com/demo-checkbox.html) * * [date](https://docs.handsontable.com/demo-date.html) * * [dropdown](https://docs.handsontable.com/demo-dropdown.html) * * [handsontable](https://docs.handsontable.com/demo-handsontable.html) * * [numeric](https://docs.handsontable.com/demo-numeric.html) * * [password](https://docs.handsontable.com/demo-password.html) * * text * * [time](https://docs.handsontable.com/demo-time.html). * * Or you can register the custom cell type under specified name and use * its name as an alias in your configuration. * * @memberof Options# * @type {string} * @default 'text' * * @example * ```js * // register custom cell type: * Handsontable.cellTypes.registerCellType('my.type', { * editor: MyEditorClass, * renderer: function(hot, td, row, col, prop, value, cellProperties) { * td.innerHTML = value; * }, * validator: function(value, callback) { * callback(value === 'foo' ? true : false); * } * }); * * // use it in column settings: * columns: [ * { * type: 'text' * }, * { * // an alias to custom type * type: 'my.type' * }, * { * type: 'checkbox' * } * ], * ``` */ type: 'text', /** * @description * Makes a cell copyable (pressing <kbd>CTRL</kbd> + <kbd>C</kbd> on your keyboard moves its value to system clipboard). * * __Note:__ this setting is `false` by default for cells with type `password`. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * cells: [ * { * cell: 0, * row: 0, * // cell with coordinates (0, 0) can't be copied * copyable: false, * } * ], * ``` */ copyable: true, /** * Defines the editor for the table/column/cell. * * If a string is provided, it may be one of the following predefined values: * * [autocomplete](https://docs.handsontable.com/demo-autocomplete.html) * * [checkbox](https://docs.handsontable.com/demo-checkbox.html) * * [date](https://docs.handsontable.com/demo-date.html) * * [dropdown](https://docs.handsontable.com/demo-dropdown.html) * * [handsontable](https://docs.handsontable.com/demo-handsontable.html) * * [mobile](https://docs.handsontable.com/demo-mobiles-and-tablets.html) * * [password](https://docs.handsontable.com/demo-password.html) * * [select](https://docs.handsontable.com/demo-select.html) * * text. * * Or you can [register](https://docs.handsontable.com/tutorial-cell-editor.html#registering-an-editor) the custom editor under specified name and use its name as an alias in your * configuration. * * To disable cell editing completely set `editor` property to `false`. * * @memberof Options# * @type {string|Function|boolean} * @default undefined * * @example * ```js * columns: [ * { * // set editor for the first column * editor: 'select' * }, * { * // disable editor for the second column * editor: false * } * ], * ``` */ editor: void 0, /** * Control number of choices for the autocomplete (or dropdown) typed cells. After exceeding it, a scrollbar for the * dropdown list of choices will appear. * * @memberof Options# * @type {number} * @default 10 * * @example * ```js * columns: [ * { * type: 'autocomplete', * // set autocomplete options list height * visibleRows: 15, * } * ], * ``` */ visibleRows: 10, /** * Makes autocomplete or dropdown width the same as the edited cell width. If `false` then editor will be scaled * according to its content. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * columns: [ * { * type: 'autocomplete', * // don't trim dropdown width with column width * trimDropdown: false, * } * ], * ``` */ trimDropdown: true, /** * When set to `true`, the text of the cell content is wrapped if it does not fit in the fixed column width. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * colWidths: 100, * columns: [ * { * // fixed column width is set but don't wrap the content * wordWrap: false, * } * ], * ``` */ wordWrap: true, /** * CSS class name added to cells with cell meta `wordWrap: false`. * * @memberof Options# * @type {string} * @default 'htNoWrap' * * @example * ```js * // set custom class for cells which content won't be wrapped * noWordWrapClassName: 'is-noWrapCell', * ``` */ noWordWrapClassName: 'htNoWrap', /** * @description * Defines if the right-click context menu should be enabled. Context menu allows to create new row or column at any * place in the grid among [other features](https://docs.handsontable.com/demo-context-menu.html). * Possible values: * * `true` (to enable default options), * * `false` (to disable completely) * * an array of [predefined options](https://docs.handsontable.com/demo-context-menu.html#page-specific), * * an object [with defined structure](https://docs.handsontable.com/demo-context-menu.html#page-custom). * * If the value is an object, you can also customize the options with: * * `disableSelection` - a `boolean`, if set to true it prevents mouseover from highlighting the item for selection * * `isCommand` - a `boolean`, if set to false it prevents clicks from executing the command and closing the menu. * * See [the context menu demo](https://docs.handsontable.com/demo-context-menu.html) for examples. * * @memberof Options# * @type {boolean|string[]|object} * @default undefined * * @example * ```js * // as a boolean * contextMenu: true, * * // as an array * contextMenu: ['row_above', 'row_below', '---------', 'undo', 'redo'], * * // as an object (`name` attribute is required in the custom keys) * contextMenu: { * items: { * "option1": { * name: "option1" * }, * "option2": { * name: "option2", * submenu: { * items: [ * { * key: "option2:suboption1", * name: "option2:suboption1", * callback: function(key, options) { * ... * } * }, * ... * ] * } * } * } * }, * ``` */ contextMenu: void 0, /** * Disables or enables the copy/paste functionality. * * @memberof Options# * @type {object|boolean} * @default true * * @example * ```js * // disable copy and paste * copyPaste: false, * * // enable copy and paste with custom configuration * copyPaste: { * columnsLimit: 25, * rowsLimit: 50, * pasteMode: 'shift_down', * uiContainer: document.body, * }, * ``` */ copyPaste: true, /** * If `true`, undo/redo functionality is enabled. * Note: `undefined` by default but it acts as enabled. * You need to switch it to `false` to disable it completely. * * @memberof Options# * @type {boolean} * @default undefined * * @example * ```js * // enable undo and redo * undo: true, * ``` */ undo: void 0, /** * @description * Turns on [Column sorting](https://docs.handsontable.com/demo-sorting-data.html). Can be either a boolean (`true` / `false`) or an object with a declared sorting options: * * `initialConfig` - Object with predefined keys: * * `column` - sorted column * * `sortOrder` - order in which column will be sorted * * `'asc'` = ascending * * `'desc'` = descending * * `indicator` - display status for sorting order indicator (an arrow icon in the column header, specifying the sorting order). * * `true` = show sort indicator for sorted columns * * `false` = don't show sort indicator for sorted columns * * `headerAction` - allow to click on the headers to sort * * `true` = turn on possibility to click on the headers to sort * * `false` = turn off possibility to click on the headers to sort * * `sortEmptyCells` - how empty values should be handled * * `true` = the table sorts empty cells * * `false` = the table moves all empty cells to the end of the table * * `compareFunctionFactory` - curry function returning compare function; compare function should work in the same way as function which is handled by native `Array.sort` method); please take a look at below examples for more information. * * @memberof Options# * @type {boolean|object} * @default undefined * * @example * ```js * // as boolean * columnSorting: true * * // as an object with initial sort config (sort ascending for column at index 1) * columnSorting: { * initialConfig: { * column: 1, * sortOrder: 'asc' * } * } * * // as an object which define specific sorting options for all columns * columnSorting: { * sortEmptyCells: true, // true = the table sorts empty cells, false = the table moves all empty cells to the end of the table * indicator: true, // true = shows indicator for all columns, false = don't show indicator for columns * headerAction: false, // true = allow to click on the headers to sort, false = turn off possibility to click on the headers to sort * compareFunctionFactory: function(sortOrder, columnMeta) { * return function(value, nextValue) { * // Some value comparisons which will return -1, 0 or 1... * } * } * }``` */ columnSorting: void 0, /** * Turns on [Manual column move](https://docs.handsontable.com/demo-moving-rows-and-columns.html), if set to a boolean or define initial column order (as an array of column indexes). * * @memberof Options# * @type {boolean|number[]} * @default undefined * * @example * ```js * // as a boolean to enable column move * manualColumnMove: true, * * // as a array with initial order * // (move column index at 0 to 1 and move column index at 1 to 4) * manualColumnMove: [1, 4], * ``` */ manualColumnMove: void 0, /** * @description * Turns on [Manual column resize](https://docs.handsontable.com/demo-resizing.html), if set to a boolean or define initial column resized widths (an an array of widths). * * @memberof Options# * @type {boolean|number[]} * @default undefined * * @example * ```js * // as a boolean to enable column resize * manualColumnResize: true, * * // as a array with initial widths * // (column at 0 index has 40px and column at 1 index has 50px) * manualColumnResize: [40, 50], * ``` */ manualColumnResize: void 0, /** * @description * Turns on [Manual row move](https://docs.handsontable.com/demo-moving-rows-and-columns.html), if set to a boolean or define initial row order (as an array of row indexes). * * @memberof Options# * @type {boolean|number[]} * @default undefined * * @example * ```js * // as a boolean * manualRowMove: true, * * // as a array with initial order * // (move row index at 0 to 1 and move row index at 1 to 4) * manualRowMove: [1, 4], * ``` */ manualRowMove: void 0, /** * @description * Turns on [Manual row resize](https://docs.handsontable.com/demo-resizing.html), if set to a boolean or define initial row resized heights (as an array of heights). * * @memberof Options# * @type {boolean|number[]} * @default undefined * * @example * ```js * // as a boolean to enable row resize * manualRowResize: true, * * // as an array to set initial heights * // (row at 0 index has 40px and row at 1 index has 50px) * manualRowResize: [40, 50], * ``` */ manualRowResize: void 0, /** * @description * If set to `true`, it enables a possibility to merge cells. If set to an array of objects, it merges the cells provided * in the objects (see the example below). More information on [the demo page](https://docs.handsontable.com/demo-merge-cells.html). * * @memberof Options# * @type {boolean|object[]} * @default false * * @example * ```js * // enables the mergeCells plugin * margeCells: true, * * // declares a list of merged sections * mergeCells: [ * // rowspan and colspan properties declare the width and height of a merged section in cells * {row: 1, col: 1, rowspan: 3, colspan: 3}, * {row: 3, col: 4, rowspan: 2, colspan: 2}, * {row: 5, col: 6, rowspan: 3, colspan: 3} * ], * ``` */ mergeCells: false, /** * @description * Turns on [Multi-column sorting](https://docs.handsontable.com/demo-multicolumn-sorting.html). Can be either a boolean (`true` / `false`) or an object with a declared sorting options: * * `initialConfig` - Array containing objects, every with predefined keys: * * `column` - sorted column * * `sortOrder` - order in which column will be sorted * * `'asc'` = ascending * * `'desc'` = descending * * `indicator` - display status for sorting order indicator (an arrow icon in the column header, specifying the sorting order). * * `true` = show sort indicator for sorted columns * * `false` = don't show sort indicator for sorted columns * * `headerAction` - allow to click on the headers to sort * * `true` = turn on possibility to click on the headers to sort * * `false` = turn off possibility to click on the headers to sort * * `sortEmptyCells` - how empty values should be handled * * `true` = the table sorts empty cells * * `false` = the table moves all empty cells to the end of the table * * `compareFunctionFactory` - curry function returning compare function; compare function should work in the same way as function which is handled by native `Array.sort` method); please take a look at below examples for more information. * * @memberof Options# * @type {boolean|object} * @default undefined * * @example * ```js * // as boolean * multiColumnSorting: true * * // as an object with initial sort config (sort ascending for column at index 1 and then sort descending for column at index 0) * multiColumnSorting: { * initialConfig: [{ * column: 1, * sortOrder: 'asc' * }, { * column: 0, * sortOrder: 'desc' * }] * } * * // as an object which define specific sorting options for all columns * multiColumnSorting: { * sortEmptyCells: true, // true = the table sorts empty cells, false = the table moves all empty cells to the end of the table * indicator: true, // true = shows indicator for all columns, false = don't show indicator for columns * headerAction: false, // true = allow to click on the headers to sort, false = turn off possibility to click on the headers to sort * compareFunctionFactory: function(sortOrder, columnMeta) { * return function(value, nextValue) { * // Some value comparisons which will return -1, 0 or 1... * } * } * }``` */ multiColumnSorting: void 0, /** * @description * Number of rows to be rendered outside of the visible part of the table. By default, it's set to `'auto'`, which * makes Handsontable to attempt to calculate the best offset performance-wise. * * You may test out different values to find the best one that works for your specific implementation. * * @memberof Options# * @type {number|string} * @default 'auto' * * @example * ```js * viewportRowRenderingOffset: 70, * ``` */ viewportRowRenderingOffset: 'auto', /** * @description * Number of columns to be rendered outside of the visible part of the table. By default, it's set to `'auto'`, which * makes Handsontable try calculating the best offset performance-wise. * * You may experiment with the value to find the one that works best for your specific implementation. * * @memberof Options# * @type {number|string} * @default 'auto' * * @example * ```js * viewportColumnRenderingOffset: 70, * ``` */ viewportColumnRenderingOffset: 'auto', /** * @description * A function, regular expression or a string, which will be used in the process of cell validation. If a function is * used, be sure to execute the callback argument with either `true` (`callback(true)`) if the validation passed * or with `false` (`callback(false)`), if the validation failed. * * __Note__, that `this` in the function points to the `cellProperties` object. * * If a string is provided, it may be one of the following predefined values: * * `autocomplete`, * * `date`, * * `numeric`, * * `time`. * * Or you can [register](https://docs.handsontable.com/demo-data-validation.html) the validator function under specified name and use its name as an alias in your * configuration. * * See more [in the demo](https://docs.handsontable.com/demo-data-validation.html). * * @memberof Options# * @type {Function|RegExp|string} * @default undefined * * @example * ```js * columns: [ * { * // as a function * validator: function(value, callback) { * ... * } * }, * { * // regular expression * validator: /^[0-9]$/ * }, * { * // as a string * validator: 'numeric' * } * ], * ``` */ validator: void 0, /** * @description * Disables visual cells selection. * * Possible values: * * `true` - Disables any type of visual selection (current, header and area selection), * * `false` - Enables any type of visual selection. This is default value. * * `'current'` - Disables the selection of a currently selected cell, the area selection is still present. * * `'area'` - Disables the area selection, the currently selected cell selection is still present. * * `'header'` - Disables the headers selection, the currently selected cell selection is still present. * * @memberof Options# * @type {boolean|string|string[]} * @default false * * @example * ```js * // as a boolean * disableVisualSelection: true, * * // as a string ('current', 'area' or 'header') * disableVisualSelection: 'current', * * // as an array * disableVisualSelection: ['current', 'area'], * ``` */ disableVisualSelection: false, /** * Disables or enables {@link ManualColumnFreeze} plugin. * * @memberof Options# * @type {boolean} * @default undefined * * @example * ```js * // enable fixed columns * manualColumnFreeze: true, * ``` */ manualColumnFreeze: void 0, /** * Defines whether Handsontable should trim the whitespace at the beginning and the end of the cell contents. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * columns: [ * { * // don't remove whitespace * trimWhitespace: false * } * ] * ``` */ trimWhitespace: true, /** * Defines data source for Autocomplete or Dropdown cell types. * * @memberof Options# * @type {Array|Function} * @default undefined * * @example * ```js * // source as a array * columns: [{ * type: 'autocomplete', * source: ['A', 'B', 'C', 'D'] * }], * * // source as a function * columns: [{ * type: 'autocomplete', * source: function(query, callback) { * fetch('https://example.com/query?q=' + query, function(response) { * callback(response.items); * }) * } * }], * ``` */ source: void 0, /** * @description * Defines the column header name. * * @memberof Options# * @type {string} * @default undefined * * @example * ```js * // set header names for every column * columns: [ * { * title: 'First name', * type: 'text', * }, * { * title: 'Last name', * type: 'text', * } * ], * ``` */ title: void 0, /** * Data template for `'checkbox'` type when checkbox is checked. * * @memberof Options# * @type {boolean|string|number} * @default true * * @example * ```js * checkedTemplate: 'good' * * // if a checkbox-typed cell is checked, then getDataAtCell(x, y), * // where x and y are the coordinates of the cell will return 'good'. * ``` */ checkedTemplate: void 0, /** * Data template for `'checkbox'` type when checkbox is unchecked. * * @memberof Options# * @type {boolean|string|number} * @default false * * @example * ```js * uncheckedTemplate: 'bad' * * // if a checkbox-typed cell is not checked, then getDataAtCell(x,y), * // where x and y are the coordinates of the cell will return 'bad'. * ``` */ uncheckedTemplate: void 0, /** * @description * Object which describes if renderer should create checkbox element with label element as a parent. * * __Note__, this option only works for [checkbox-typed](https://docs.handsontable.com/demo-checkbox.html) cells. * * By default the [checkbox](https://docs.handsontable.com/demo-checkbox.html) renderer renders the checkbox without a label. * * Possible object properties: * * `property` - Defines the property name of the data object, which will to be used as a label. * (eg. `label: {property: 'name.last'}`). This option works only if data was passed as an array of objects. * * `position` - String which describes where to place the label text (before or after checkbox element). * Valid values are `'before'` and '`after`' (defaults to `'after'`). * * `value` - String or a Function which will be used as label text. * * @memberof Options# * @type {object} * @default undefined * * @example * ```js * columns: [{ * type: 'checkbox', * // add "My label:" after the checkbox * label: {position: 'after', value: 'My label: '} * }], * ``` */ label: void 0, /** * Display format for numeric typed renderers. * * __Note__, this option only works for [numeric-typed](https://docs.handsontable.com/demo-numeric.html) cells. * * Format is described by two properties: * * `pattern` - Handled by `numbro` for purpose of formatting numbers to desired pattern. List of supported patterns can be found [here](http://numbrojs.com/format.html#numbers). * * `culture` - Handled by `numbro` for purpose of formatting currencies. Examples showing how it works can be found [here](http://numbrojs.com/format.html#currency). List of supported cultures can be found [here](http://numbrojs.com/languages.html#supported-languages). * * __Note:__ Please keep in mind that this option is used only to format the displayed output! It has no effect on the input data provided for the cell. The numeric data can be entered to the table only as floats (separated by a dot or a comma) or integers, and are stored in the source dataset as JavaScript numbers. * * Handsontable uses [numbro](http://numbrojs.com/) as a main library for numbers formatting. * * @memberof Options# * @since 0.35.0 * @type {object} * @default undefined * * @example * ```js * columns: [ * { * type: 'numeric', * // set desired format pattern and * numericFormat: { * pattern: '0,00', * culture: 'en-US' * } * } * ], * ``` */ numericFormat: void 0, /** * Language for Handsontable translation. Possible language codes are [listed here](https://docs.handsontable.com/tutorial-internationalization.html#available-languages). * * @memberof Options# * @type {string} * @default 'en-US' * * @example * ```js * // set Polish language * language: 'pl-PL', * ``` */ language: 'en-US', /** * Data source for [select-typed](https://docs.handsontable.com/demo-select.html) cells. * * __Note__, this option only works for [select-typed](https://docs.handsontable.com/demo-select.html) cells. * * @memberof Options# * @type {string[]} * @default undefined * * @example * ```js * columns: [ * { * editor: 'select', * // add three select options to choose from * selectOptions: ['A', 'B', 'C'], * } * ], * ``` */ selectOptions: void 0, /** * Enables or disables the {@link AutoColumnSize} plugin. Default value is `undefined`, which has the same effect as `true`, * meaning, the `syncLimit` is set to 50. * Disabling this plugin can increase performance, as no size-related calculations would be done. * * Column width calculations are divided into sync and async part. Each of those parts has their own advantages and * disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous * operations don't block the browser UI. * * To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value. * * You can also use the `useHeaders` option to take the column headers width into calculation. * * @memberof Options# * @type {object|boolean} * @default {syncLimit: 50} * * @example * ```js * // as a number (300 columns in sync, rest async) * autoColumnSize: {syncLimit: 300}, * * // as a string (percent) * autoColumnSize: {syncLimit: '40%'}, * * // use headers width while calculating the column width * autoColumnSize: {useHeaders: true}, * ``` */ autoColumnSize: void 0, /** * Enables or disables {@link AutoRowSize} plugin. Default value is `undefined`, which has the same effect as `false` * (disabled). Enabling this plugin can decrease performance, as size-related calculations would be performed. * * __Note:__ the default `syncLimit` value is set to 500 when the plugin is manually enabled by declaring it as: `autoRowSize: true`. * * Row height calculations are divided into sync and async stages. Each of these stages has their own advantages and * disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous * operations don't block the browser UI. * * To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value. * * @memberof Options# * @type {object|boolean} * @default undefined * * @example * ```js * // as a number (300 columns in sync, rest async) * autoRowSize: {syncLimit: 300}, * * // as a string (percent) * autoRowSize: {syncLimit: '40%'}, * ``` */ autoRowSize: void 0, /** * Date validation format. * * __Note__, this option only works for [date-typed](https://docs.handsontable.com/demo-date.html) cells. * * @memberof Options# * @type {string} * @default 'DD/MM/YYYY' * * @example * ```js * columns: [{ * type: 'date', * // localise date format * dateFormat: 'MM/DD/YYYY' * }], * ``` */ dateFormat: 'DD/MM/YYYY', /** * If `true` then dates will be automatically formatted to match the desired format. * * __Note__, this option only works for [date-typed](https://docs.handsontable.com/demo-date.html) cells. * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * columns: [{ * type: 'date', * dateFormat: 'YYYY-MM-DD', * // force selected date format * correctFormat: true * }], * ``` */ correctFormat: false, /** * Definition of default value which will fill the empty cells. * * __Note__, this option only works for [date-typed](https://docs.handsontable.com/demo-date.html) cells. * * @memberof Options# * @type {string} * @default undefined * * @example * ```js * columns: [ * { * type: 'date', * // always set this date for empty cells * defaultDate: '2015-02-02' * } * ], * ``` */ defaultDate: void 0, /** * If set to `true`, the value entered into the cell must match (case-sensitive) the autocomplete source. * Otherwise, cell won't pass the validation. When filtering the autocomplete source list, the editor will * be working in case-insensitive mode. * * __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells. * * @memberof Options# * @type {boolean} * @default undefined * * @example * ```js * columns: [{ * type: 'autocomplete', * source: ['A', 'B', 'C'], * // force selected value to match the source list * strict: true * }], * ``` */ strict: void 0, /** * If set to `true`, data defined in `source` of the autocomplete or dropdown cell will be treated as HTML. * * __Warning:__ Enabling this option can cause serious XSS vulnerabilities. * * __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells. * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * columns: [{ * type: 'autocomplete', * // use HTML in the source list * allowHtml: true, * source: ['<strong>foo</strong>', '<strong>bar</strong>'] * }], * ``` */ allowHtml: false, /** * If typed `true` then virtual rendering mechanism for handsontable will be disabled. * * @memberof Options# * @type {boolean} * @default undefined * * @example * ```js * // disable virtual rows rendering * renderAllRows: true, * ``` */ renderAllRows: void 0, /** * Prevents table to overlap outside the parent element. If `'horizontal'` option is chosen then table will show * a horizontal scrollbar if parent's width is narrower then table's width. * * Possible values: * * `false` - Disables functionality. * * `horizontal` - Prevents horizontal overflow table. * * `vertical` - Prevents vertical overflow table. * * @memberof Options# * @type {string|boolean} * @default false * * @example * ```js * preventOverflow: 'horizontal', * ``` */ preventOverflow: false, /** * Prevents wheel event on overlays for doing default action. * * @memberof Options# * @private * @type {boolean} * @default false * * @example * ```js * preventWheel: false, * ``` */ preventWheel: false, /** * @description * Enables the functionality of the {@link BindRowsWithHeaders} plugin which allows binding the table rows with their headers. * If the plugin is enabled, the table row headers will "stick" to the rows, when they are hidden/moved. Basically, * if at the initialization row 0 has a header titled "A", it will have it no matter what you do with the table. * * @memberof Options# * @type {boolean|string} * @default undefined * * @example * ```js * // keep row data and row headers in sync * bindRowsWithHeaders: true * ``` */ bindRowsWithHeaders: void 0, /** * @description * The {@link CollapsibleColumns} plugin allows collapsing of columns, covered by a header with the `colspan` property * defined. * * Clicking the "collapse/expand" button collapses (or expands) all "child" headers except the first one. * * Setting the `collapsibleColumns` property to `true` will display a "collapse/expand" button in every * header with a defined colspan` property. * * To limit this functionality to a smaller group of headers, define the `collapsibleColumns` property * as an array of objects, as in the example below. * * @memberof Options# * @type {boolean|object[]} * @default undefined * * @example * ```js * // enable collapsing for all headers * collapsibleColumns: true, * * // or * // enable collapsing for selected headers * collapsibleColumns: [ * {row: -4, col: 1, collapsible: true}, * {row: -3, col: 5, collapsible: true} * ], * ``` */ collapsibleColumns: void 0, /** * @description * Allows making pre-defined calculations on the cell values and display the results within Handsontable. * * Possible types: * * `'sum'` * * `'min'` * * `'max'` * * `'count'` * * `'average'` * * `'custom'` - add `customFunction`. * * [See the demo for more information](https://docs.handsontable.com/demo-summary-calculations.html). * * @memberof Options# * @type {object[]|Function} * @default undefined * * @example * ``` * columnSummary: [ * { * destinationRow: 4, * destinationColumn: 1, * forceNumeric: true, * reversedRowCoords: true, * suppressDataTypeErrors: false, * readOnly: true, * roundFloat: false, * type: 'custom', * customFunction: function(endpoint) { * return 100; * } * } * ], * ``` */ columnSummary: void 0, /** * This plugin allows adding a configurable dropdown menu to the table's column headers. The dropdown menu acts like * the {@link Options#contextMenu}, but is triggered by clicking the button in the header. * * @memberof Options# * @type {boolean|object|string[]} * @default undefined * * @example * ```js * // enable dropdown menu * dropdownMenu: true, * * // or * // enable and configure dropdown menu options * dropdownMenu: ['remove_col', '---------', 'make_read_only', 'alignment'] * ``` */ dropdownMenu: void 0, /** * The {@link Filters} plugin allows filtering the table data either by the built-in component or with the API. * * @memberof Options# * @type {boolean} * @default undefined * * @example * ```js * // enable filters * filters: true, * ``` */ filters: void 0, /** * The {@link Formulas} plugin allows Handsontable to process formula expressions defined in the provided data. * * @memberof Options# * @type {boolean|object} * @default undefined * * @example * ```js * // enable formulas plugin * formulas: true, * * // or as an object with custom variables to be used in formula expressions * formulas: { * variables: { * FOO: 64, * BAR: 'baz', * } * }, * ``` */ formulas: void 0, /** * @description * Allows adding a tooltip to the table headers. * * Available options: * * the `rows` property defines if tooltips should be added to row headers, * * the `columns` property defines if tooltips should be added to column headers, * * the `onlyTrimmed` property defines if tooltips should be added only to headers, which content is trimmed by the header itself (the content being wider then the header). * * @memberof Options# * @type {boolean|object} * @default undefined * @deprecated This plugin is deprecated and will be removed in the next major release. * * @example * ```js * // enable tooltips for all headers * headerTooltips: true, * * // or * headerTooltips: { * rows: false, * columns: true, * onlyTrimmed: true * } * ``` */ headerTooltips: void 0, /** * The {@link HiddenColumns} plugin allows hiding of certain columns. You can pass additional configuration with an * object notation. Options that are then available are: * * `columns` - an array of rows that should be hidden on plugin initialization * * `indicators` - enables small ui markers to indicate where are hidden columns. * * @memberof Options# * @type {boolean|object} * @default undefined * * @example * ```js * // enable column hiding * hiddenColumns: true, * * // or * hiddenColumns: { * // set columns that are hidden by default * columns: [5, 10, 15], * // show where are hidden columns * indicators: true * } * ``` */ hiddenColumns: void 0, /** * The {@link HiddenRows} plugin allows hiding of certain rows. You can pass additional configuration with an * object notation. Options that are then available are: * * `rows` - an array of rows that should be hidden on plugin initialization * * `indicators` - enables small ui markers to indicate where are hidden columns. * * @memberof Options# * @type {boolean|object} * @default undefined * * @example * ```js * // enable row hiding * hiddenRows: true, * * // or * hiddenRows: { * // set rows that are hidden by default * rows: [5, 10, 15], * // show where are hidden rows * indicators: true * } * ``` */ hiddenRows: void 0, /** * @description * Allows creating a nested header structure, using the HTML's colspan attribute. * * @memberof Options# * @type {Array[]} * @default undefined * * @example * ``` * nestedHeaders: [ * ['A', {label: 'B', colspan: 8}, 'C'], * ['D', {label: 'E', colspan: 4}, {label: 'F', colspan: 4}, 'G'], * ['H', 'I', 'J', 'K', 'L', 'M', 'N', 'R', 'S', 'T'] * ], * ``` */ nestedHeaders: void 0, /** * @description * Plugin allowing hiding of certain rows. * * @memberof Options# * @type {boolean|number[]} * @default undefined * * @example * ```js * // enable plugin * trimRows: true, * * // or * // trim selected rows on table initialization * trimRows: [5, 10, 15], * ``` */ trimRows: void 0, /** * @description * Allows setting a custom width of the row headers. You can provide a number or an array of widths, if many row * header levels are defined. * * @memberof Options# * @type {number|number[]} * @default undefined * * @example * ```js * // set width for all row headers * rowHeaderWidth: 25, * * // or * // set width for selected headers only * rowHeaderWidth: [25, 30, 55], * ``` */ rowHeaderWidth: void 0, /** * @description * Allows setting a custom height of the column headers. You can provide a number or an array of heights, if many * column header levels are defined. * * @memberof Options# * @type {number|number[]} * @default undefined * * @example * ```js * // set shared height for all headers * columnHeaderHeight: 35, * * // or * // set height for each header individually * columnHeaderHeight: [35, 20, 55], * * // or * // skipped headers will fallback to default value * columnHeaderHeight: [35, undefined, 55], * ``` */ columnHeaderHeight: void 0, /** * @description * Enables the {@link ObserveChanges} plugin switches table into one-way data binding where changes are applied into * data source (from outside table) will be automatically reflected in the table. * * For every data change [afterChangesObserved](Hooks.html#event:afterChangesObserved) hook will be fired. * * @memberof Options# * @type {boolean} * @default undefined * @deprecated This plugin is deprecated and will be removed in the next major release. * * @example * ```js * observeChanges: true, * ``` */ observeChanges: void 0, /** * If defined as `true`, the Autocomplete's suggestion list would be sorted by relevance (the closer to the left the * match is, the higher the suggestion). * * __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * columns: [ * { * type: 'autocomplete', * source: [ ... ], * // keep options order as they were defined * sortByRelevance: false * } * ], * ``` */ sortByRelevance: true, /** * If defined as `true`, when the user types into the input area the Autocomplete's suggestion list is updated to only * include those choices starting with what has been typed; if defined as `false` all suggestions remain shown, with * those matching what has been typed marked in bold. * * __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * columns: [ * { * type: 'autocomplete', * source: [ ... ], * // don't hide options that don't match search query * filter: false * } * ], * ``` */ filter: true, /** * If defined as `true`, filtering in the Autocomplete Editor will be case-sensitive. * * __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells. * * @memberof Options# * @type {boolean} * @default false * * @example * ```js * columns: [ * { * type: 'autocomplete', * source: [ ... ], * // match case while searching autocomplete options * filteringCaseSensitive: true * } * ], * ``` */ filteringCaseSensitive: false, /** * @description * Disables or enables the drag to scroll functionality. * * @memberof Options# * @type {boolean} * @default true * * @example * ```js * // don't scroll the viewport when selection gets to the viewport edge * dragToScroll: false, * ``` */ dragToScroll: true, /** * @description * Disable or enable the nested rows functionality - displaying nested structures in a two-dimensional data table. * * See [quick setup of the Nested rows](https://handsontable.com/docs/demo-nested-rows.html). * @example * ```js * nestedRows: true, * ``` * * @memberof Options# * @type {boolean} * @default false */ nestedRows: void 0, }; };
1
17,846
We could go further and directly give the tips of the `columns` property for someone (like me) who would like to specify some column width and let the others be autosized.
handsontable-handsontable
js
@@ -1058,8 +1058,17 @@ void VkRenderFramework::InitRenderTarget(uint32_t targets, VkImageView *dsBindin VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; // Must include dep_by_region bit when src & dst both include framebuffer-space stages subpass_dep.dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; - rp_info.dependencyCount = 1; - rp_info.pDependencies = &subpass_dep; + } + + if (m_additionalSubpassDependencies.size()) { + m_renderPass_dependencies.reserve(m_additionalSubpassDependencies.size() + m_renderPass_dependencies.size()); + m_renderPass_dependencies.insert(m_renderPass_dependencies.end(), m_additionalSubpassDependencies.begin(), + m_additionalSubpassDependencies.end()); + } + + if (m_renderPass_dependencies.size()) { + rp_info.dependencyCount = static_cast<uint32_t>(m_renderPass_dependencies.size()); + rp_info.pDependencies = m_renderPass_dependencies.data(); } else { rp_info.dependencyCount = 0; rp_info.pDependencies = nullptr;
1
/* * Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * Copyright (c) 2015-2021 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> * Author: Tony Barbour <tony@LunarG.com> * Author: Dave Houlton <daveh@lunarg.com> */ #include "vkrenderframework.h" #include <algorithm> #include <cassert> #include <cstring> #include <utility> #include <vector> #include "vk_format_utils.h" #include "vk_extension_helper.h" using std::string; using std::strncmp; using std::vector; template <typename C, typename F> typename C::iterator RemoveIf(C &container, F &&fn) { return container.erase(std::remove_if(container.begin(), container.end(), std::forward<F>(fn)), container.end()); } ErrorMonitor::ErrorMonitor(Behavior behavior) : behavior_(behavior) { test_platform_thread_create_mutex(&mutex_); MonitorReset(); if (behavior_ == Behavior::DefaultSuccess) { ExpectSuccess(kErrorBit); } } ErrorMonitor::~ErrorMonitor() NOEXCEPT { test_platform_thread_delete_mutex(&mutex_); } void ErrorMonitor::MonitorReset() { message_flags_ = 0; bailout_ = NULL; message_found_ = VK_FALSE; failure_message_strings_.clear(); desired_message_strings_.clear(); ignore_message_strings_.clear(); allowed_message_strings_.clear(); other_messages_.clear(); } void ErrorMonitor::Reset() { test_platform_thread_lock_mutex(&mutex_); MonitorReset(); test_platform_thread_unlock_mutex(&mutex_); } void ErrorMonitor::SetDesiredFailureMsg(const VkFlags msgFlags, const string msg) { SetDesiredFailureMsg(msgFlags, msg.c_str()); } void ErrorMonitor::SetDesiredFailureMsg(const VkFlags msgFlags, const char *const msgString) { if (NeedCheckSuccess()) { VerifyNotFound(); } test_platform_thread_lock_mutex(&mutex_); desired_message_strings_.insert(msgString); message_flags_ |= msgFlags; test_platform_thread_unlock_mutex(&mutex_); } void ErrorMonitor::SetAllowedFailureMsg(const char *const msg) { test_platform_thread_lock_mutex(&mutex_); allowed_message_strings_.emplace_back(msg); test_platform_thread_unlock_mutex(&mutex_); } void ErrorMonitor::SetUnexpectedError(const char *const msg) { if (NeedCheckSuccess()) { VerifyNotFound(); } test_platform_thread_lock_mutex(&mutex_); ignore_message_strings_.emplace_back(msg); test_platform_thread_unlock_mutex(&mutex_); } VkBool32 ErrorMonitor::CheckForDesiredMsg(const char *const msgString) { VkBool32 result = VK_FALSE; test_platform_thread_lock_mutex(&mutex_); if (bailout_ != nullptr) { *bailout_ = true; } string errorString(msgString); bool found_expected = false; if (!IgnoreMessage(errorString)) { for (auto desired_msg_it = desired_message_strings_.begin(); desired_msg_it != desired_message_strings_.end(); ++desired_msg_it) { if ((*desired_msg_it).length() == 0) { // An empty desired_msg string "" indicates a positive test - not expecting an error. // Return true to avoid calling layers/driver with this error. // And don't erase the "" string, so it remains if another error is found. result = VK_TRUE; found_expected = true; message_found_ = true; failure_message_strings_.insert(errorString); } else if (errorString.find(*desired_msg_it) != string::npos) { found_expected = true; failure_message_strings_.insert(errorString); message_found_ = true; result = VK_TRUE; // Remove a maximum of one failure message from the set // Multiset mutation is acceptable because `break` causes flow of control to exit the for loop desired_message_strings_.erase(desired_msg_it); break; } } if (!found_expected && allowed_message_strings_.size()) { for (auto allowed_msg_it = allowed_message_strings_.begin(); allowed_msg_it != allowed_message_strings_.end(); ++allowed_msg_it) { if (errorString.find(*allowed_msg_it) != string::npos) { found_expected = true; break; } } } if (!found_expected) { printf("Unexpected: %s\n", msgString); other_messages_.push_back(errorString); } } test_platform_thread_unlock_mutex(&mutex_); return result; } vector<string> ErrorMonitor::GetOtherFailureMsgs() const { return other_messages_; } VkDebugReportFlagsEXT ErrorMonitor::GetMessageFlags() { return message_flags_; } bool ErrorMonitor::AnyDesiredMsgFound() const { return message_found_; } bool ErrorMonitor::AllDesiredMsgsFound() const { return desired_message_strings_.empty(); } void ErrorMonitor::SetError(const char *const errorString) { test_platform_thread_lock_mutex(&mutex_); message_found_ = true; failure_message_strings_.insert(errorString); test_platform_thread_unlock_mutex(&mutex_); } void ErrorMonitor::SetBailout(bool *bailout) { test_platform_thread_lock_mutex(&mutex_); bailout_ = bailout; test_platform_thread_unlock_mutex(&mutex_); } void ErrorMonitor::DumpFailureMsgs() const { vector<string> otherMsgs = GetOtherFailureMsgs(); if (otherMsgs.size()) { std::cout << "Other error messages logged for this test were:" << std::endl; for (auto iter = otherMsgs.begin(); iter != otherMsgs.end(); iter++) { std::cout << " " << *iter << std::endl; } } } void ErrorMonitor::ExpectSuccess(VkDebugReportFlagsEXT const message_flag_mask) { // Match ANY message matching specified type test_platform_thread_lock_mutex(&mutex_); desired_message_strings_.insert(""); message_flags_ = message_flag_mask; test_platform_thread_unlock_mutex(&mutex_); } void ErrorMonitor::VerifyFound() { test_platform_thread_lock_mutex(&mutex_); // Not receiving expected message(s) is a failure. /Before/ throwing, dump any other messages if (!AllDesiredMsgsFound()) { DumpFailureMsgs(); for (const auto &desired_msg : desired_message_strings_) { ADD_FAILURE() << "Did not receive expected error '" << desired_msg << "'"; } } else if (GetOtherFailureMsgs().size() > 0) { // Fail test case for any unexpected errors #if defined(ANDROID) // This will get unexpected errors into the adb log for (auto msg : other_messages_) { __android_log_print(ANDROID_LOG_INFO, "VulkanLayerValidationTests", "[ UNEXPECTED_ERR ] '%s'", msg.c_str()); } #else ADD_FAILURE() << "Received unexpected error(s)."; #endif } MonitorReset(); test_platform_thread_unlock_mutex(&mutex_); if (behavior_ == Behavior::DefaultSuccess) { ExpectSuccess(); } } void ErrorMonitor::VerifyNotFound() { test_platform_thread_lock_mutex(&mutex_); // ExpectSuccess() configured us to match anything. Any error is a failure. if (AnyDesiredMsgFound()) { DumpFailureMsgs(); for (const auto &msg : failure_message_strings_) { ADD_FAILURE() << "Expected to succeed but got error: " << msg; } } else if (GetOtherFailureMsgs().size() > 0) { // Fail test case for any unexpected errors #if defined(ANDROID) // This will get unexpected errors into the adb log for (auto msg : other_messages_) { __android_log_print(ANDROID_LOG_INFO, "VulkanLayerValidationTests", "[ UNEXPECTED_ERR ] '%s'", msg.c_str()); } #else ADD_FAILURE() << "Received unexpected error(s)."; #endif } MonitorReset(); test_platform_thread_unlock_mutex(&mutex_); } bool ErrorMonitor::IgnoreMessage(string const &msg) const { if (ignore_message_strings_.empty()) { return false; } return std::find_if(ignore_message_strings_.begin(), ignore_message_strings_.end(), [&msg](string const &str) { return msg.find(str) != string::npos; }) != ignore_message_strings_.end(); } void DebugReporter::Create(VkInstance instance) NOEXCEPT { assert(instance); assert(!debug_obj_); auto DebugCreate = reinterpret_cast<DebugCreateFnType>(vk::GetInstanceProcAddr(instance, debug_create_fn_name_)); if (!DebugCreate) return; const VkResult err = DebugCreate(instance, &debug_create_info_, nullptr, &debug_obj_); if (err) debug_obj_ = VK_NULL_HANDLE; } void DebugReporter::Destroy(VkInstance instance) NOEXCEPT { assert(instance); assert(debug_obj_); // valid to call with null object, but probably bug auto DebugDestroy = reinterpret_cast<DebugDestroyFnType>(vk::GetInstanceProcAddr(instance, debug_destroy_fn_name_)); assert(DebugDestroy); DebugDestroy(instance, debug_obj_, nullptr); debug_obj_ = VK_NULL_HANDLE; } #ifdef VK_USE_PLATFORM_ANDROID_KHR VKAPI_ATTR VkBool32 VKAPI_CALL DebugReporter::DebugCallback(VkDebugReportFlagsEXT message_flags, VkDebugReportObjectTypeEXT, uint64_t, size_t, int32_t, const char *, const char *message, void *user_data) { #else VKAPI_ATTR VkBool32 VKAPI_CALL DebugReporter::DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, VkDebugUtilsMessageTypeFlagsEXT message_types, const VkDebugUtilsMessengerCallbackDataEXT *callback_data, void *user_data) { const auto message_flags = DebugAnnotFlagsToReportFlags(message_severity, message_types); const char *message = callback_data->pMessage; #endif ErrorMonitor *errMonitor = (ErrorMonitor *)user_data; if (message_flags & errMonitor->GetMessageFlags()) { return errMonitor->CheckForDesiredMsg(message); } return VK_FALSE; } VkRenderFramework::VkRenderFramework() : instance_(NULL), m_device(NULL), m_commandPool(VK_NULL_HANDLE), m_commandBuffer(NULL), m_renderPass(VK_NULL_HANDLE), m_framebuffer(VK_NULL_HANDLE), m_surface(VK_NULL_HANDLE), m_swapchain(VK_NULL_HANDLE), m_addRenderPassSelfDependency(false), m_width(256.0), // default window width m_height(256.0), // default window height m_render_target_fmt(VK_FORMAT_R8G8B8A8_UNORM), m_depth_stencil_fmt(VK_FORMAT_UNDEFINED), m_clear_via_load_op(true), m_depth_clear_color(1.0), m_stencil_clear_color(0), m_depthStencil(NULL) { m_framebuffer_info = LvlInitStruct<VkFramebufferCreateInfo>(); m_renderPass_info = LvlInitStruct<VkRenderPassCreateInfo>(); m_renderPassBeginInfo = LvlInitStruct<VkRenderPassBeginInfo>(); // clear the back buffer to dark grey m_clear_color.float32[0] = 0.25f; m_clear_color.float32[1] = 0.25f; m_clear_color.float32[2] = 0.25f; m_clear_color.float32[3] = 0.0f; } VkRenderFramework::~VkRenderFramework() { ShutdownFramework(); } VkPhysicalDevice VkRenderFramework::gpu() { EXPECT_NE((VkInstance)0, instance_); // Invalid to request gpu before instance exists return gpu_; } VkPhysicalDeviceProperties VkRenderFramework::physDevProps() { EXPECT_NE((VkPhysicalDevice)0, gpu_); // Invalid to request physical device properties before gpu return physDevProps_; } // Return true if layer name is found and spec+implementation values are >= requested values bool VkRenderFramework::InstanceLayerSupported(const char *const layer_name, const uint32_t spec_version, const uint32_t impl_version) { const auto layers = vk_testing::GetGlobalLayers(); for (const auto &layer : layers) { if (0 == strncmp(layer_name, layer.layerName, VK_MAX_EXTENSION_NAME_SIZE)) { return layer.specVersion >= spec_version && layer.implementationVersion >= impl_version; } } return false; } // Return true if extension name is found and spec value is >= requested spec value // WARNING: for simplicity, does not cover layers' extensions bool VkRenderFramework::InstanceExtensionSupported(const char *const extension_name, const uint32_t spec_version) { // WARNING: assume debug and validation feature extensions are always supported, which are usually provided by layers if (0 == strncmp(extension_name, VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) return true; if (0 == strncmp(extension_name, VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) return true; if (0 == strncmp(extension_name, VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) return true; const auto extensions = vk_testing::GetGlobalExtensions(); const auto IsTheQueriedExtension = [extension_name, spec_version](const VkExtensionProperties &extension) { return strncmp(extension_name, extension.extensionName, VK_MAX_EXTENSION_NAME_SIZE) == 0 && extension.specVersion >= spec_version; }; return std::any_of(extensions.begin(), extensions.end(), IsTheQueriedExtension); } // Enable device profile as last layer on stack overriding devsim if there, or return if not available bool VkRenderFramework::EnableDeviceProfileLayer() { if (InstanceLayerSupported("VK_LAYER_LUNARG_device_profile_api")) { if (VkTestFramework::m_devsim_layer) { assert(0 == strncmp(instance_layers_.back(), "VK_LAYER_LUNARG_device_simulation", VK_MAX_EXTENSION_NAME_SIZE)); instance_layers_.back() = "VK_LAYER_LUNARG_device_profile_api"; } else { instance_layers_.push_back("VK_LAYER_LUNARG_device_profile_api"); } } else { printf(" Did not find VK_LAYER_LUNARG_device_profile_api layer; skipped.\n"); return false; } return true; } // Return true if instance exists and extension name is in the list bool VkRenderFramework::InstanceExtensionEnabled(const char *ext_name) { if (!instance_) return false; return std::any_of(instance_extensions_.begin(), instance_extensions_.end(), [ext_name](const char *e) { return 0 == strncmp(ext_name, e, VK_MAX_EXTENSION_NAME_SIZE); }); } // Return true if extension name is found and spec value is >= requested spec value bool VkRenderFramework::DeviceExtensionSupported(const char *extension_name, const uint32_t spec_version) const { if (!instance_ || !gpu_) { EXPECT_NE((VkInstance)0, instance_); // Complain, not cool without an instance EXPECT_NE((VkPhysicalDevice)0, gpu_); return false; } const vk_testing::PhysicalDevice device_obj(gpu_); const auto enabled_layers = instance_layers_; // assumes instance_layers_ contains enabled layers auto extensions = device_obj.extensions(); for (const auto &layer : enabled_layers) { const auto layer_extensions = device_obj.extensions(layer); extensions.insert(extensions.end(), layer_extensions.begin(), layer_extensions.end()); } const auto IsTheQueriedExtension = [extension_name, spec_version](const VkExtensionProperties &extension) { return strncmp(extension_name, extension.extensionName, VK_MAX_EXTENSION_NAME_SIZE) == 0 && extension.specVersion >= spec_version; }; return std::any_of(extensions.begin(), extensions.end(), IsTheQueriedExtension); } // Return true if device is created and extension name is found in the list bool VkRenderFramework::DeviceExtensionEnabled(const char *ext_name) { if (NULL == m_device) return false; bool ext_found = false; for (auto ext : m_device_extension_names) { if (!strncmp(ext, ext_name, VK_MAX_EXTENSION_NAME_SIZE)) { ext_found = true; break; } } return ext_found; } // Some tests may need to be skipped if the devsim layer is in use. bool VkRenderFramework::DeviceSimulation() { return m_devsim_layer; } VkInstanceCreateInfo VkRenderFramework::GetInstanceCreateInfo() const { return { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, &debug_reporter_.debug_create_info_, 0, &app_info_, static_cast<uint32_t>(instance_layers_.size()), instance_layers_.data(), static_cast<uint32_t>(instance_extensions_.size()), instance_extensions_.data(), }; } void VkRenderFramework::InitFramework(void * /*unused compatibility parameter*/, void *instance_pnext) { ASSERT_EQ((VkInstance)0, instance_); const auto LayerNotSupportedWithReporting = [](const char *layer) { if (InstanceLayerSupported(layer)) return false; else { ADD_FAILURE() << "InitFramework(): Requested layer \"" << layer << "\" is not supported. It will be disabled."; return true; } }; const auto ExtensionNotSupportedWithReporting = [](const char *extension) { if (InstanceExtensionSupported(extension)) return false; else { ADD_FAILURE() << "InitFramework(): Requested extension \"" << extension << "\" is not supported. It will be disabled."; return true; } }; static bool driver_printed = false; static bool print_driver_info = GetEnvironment("VK_LAYER_TESTS_PRINT_DRIVER") != ""; if (print_driver_info && !driver_printed && InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { instance_extensions_.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } RemoveIf(instance_layers_, LayerNotSupportedWithReporting); RemoveIf(instance_extensions_, ExtensionNotSupportedWithReporting); auto ici = GetInstanceCreateInfo(); // concatenate pNexts void *last_pnext = nullptr; if (instance_pnext) { last_pnext = instance_pnext; while (reinterpret_cast<const VkBaseOutStructure *>(last_pnext)->pNext) last_pnext = reinterpret_cast<VkBaseOutStructure *>(last_pnext)->pNext; void *&link = reinterpret_cast<void *&>(reinterpret_cast<VkBaseOutStructure *>(last_pnext)->pNext); link = const_cast<void *>(ici.pNext); ici.pNext = instance_pnext; } ASSERT_VK_SUCCESS(vk::CreateInstance(&ici, nullptr, &instance_)); if (instance_pnext) reinterpret_cast<VkBaseOutStructure *>(last_pnext)->pNext = nullptr; // reset back borrowed pNext chain // Choose a physical device uint32_t gpu_count = 0; const VkResult err = vk::EnumeratePhysicalDevices(instance_, &gpu_count, nullptr); ASSERT_TRUE(err == VK_SUCCESS || err == VK_INCOMPLETE) << vk_result_string(err); ASSERT_GT(gpu_count, (uint32_t)0) << "No GPU (i.e. VkPhysicalDevice) available"; std::vector<VkPhysicalDevice> phys_devices(gpu_count); vk::EnumeratePhysicalDevices(instance_, &gpu_count, phys_devices.data()); const int phys_device_index = VkTestFramework::m_phys_device_index; if ((phys_device_index >= 0) && (phys_device_index < static_cast<int>(gpu_count))) { gpu_ = phys_devices[phys_device_index]; vk::GetPhysicalDeviceProperties(gpu_, &physDevProps_); } else { // Specify a "physical device priority" with larger values meaning higher priority. std::array<int, VK_PHYSICAL_DEVICE_TYPE_CPU + 1> device_type_rank; device_type_rank[VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU] = 4; device_type_rank[VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU] = 3; device_type_rank[VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU] = 2; device_type_rank[VK_PHYSICAL_DEVICE_TYPE_CPU] = 1; device_type_rank[VK_PHYSICAL_DEVICE_TYPE_OTHER] = 0; // Initialize physical device and properties with first device found gpu_ = phys_devices[0]; m_gpu_index = 0; vk::GetPhysicalDeviceProperties(gpu_, &physDevProps_); // See if there are any higher priority devices found for (size_t i = 1; i < phys_devices.size(); ++i) { VkPhysicalDeviceProperties tmp_props; vk::GetPhysicalDeviceProperties(phys_devices[i], &tmp_props); if (device_type_rank[tmp_props.deviceType] > device_type_rank[physDevProps_.deviceType]) { physDevProps_ = tmp_props; gpu_ = phys_devices[i]; m_gpu_index = i; } } } debug_reporter_.Create(instance_); if (print_driver_info && !driver_printed) { auto driver_properties = LvlInitStruct<VkPhysicalDeviceDriverProperties>(); auto physical_device_properties2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&driver_properties); vk::GetPhysicalDeviceProperties2(gpu_, &physical_device_properties2); printf("Driver Name = %s\n", driver_properties.driverName); printf("Driver Info = %s\n", driver_properties.driverInfo); driver_printed = true; } for (const auto &ext : m_requested_extensions) { AddRequiredDeviceExtensions(ext); } } bool VkRenderFramework::AddRequiredExtensions(const char *ext_name) { m_requested_extensions.push_back(ext_name); return AddRequiredInstanceExtensions(ext_name); } bool VkRenderFramework::AreRequestedExtensionsEnabled() const { for (const auto &ext : m_requested_extensions) { // `ext` may refer to an instance or device extension if (!CanEnableDeviceExtension(ext) && !CanEnableInstanceExtension(ext)) { return false; } } return true; } bool VkRenderFramework::AddRequiredInstanceExtensions(const char *ext_name) { if (CanEnableInstanceExtension(ext_name)) { return true; } const auto &instance_exts_map = InstanceExtensions::get_info_map(); bool is_instance_ext = false; if (instance_exts_map.count(ext_name) > 0) { if (!InstanceExtensionSupported(ext_name)) { return false; } else { is_instance_ext = true; } } // Different tables need to be used for extension dependency lookup depending on whether `ext_name` refers to a device or // instance extension if (is_instance_ext) { const auto &info = InstanceExtensions::get_info(ext_name); for (const auto &req : info.requirements) { if (!AddRequiredInstanceExtensions(req.name)) { return false; } } m_instance_extension_names.push_back(ext_name); } else { const auto &info = DeviceExtensions::get_info(ext_name); for (const auto &req : info.requirements) { if (!AddRequiredInstanceExtensions(req.name)) { return false; } } } return true; } bool VkRenderFramework::CanEnableInstanceExtension(const std::string &inst_ext_name) const { return std::any_of(m_instance_extension_names.cbegin(), m_instance_extension_names.cend(), [&inst_ext_name](const char *ext) { return inst_ext_name == ext; }); } bool VkRenderFramework::AddRequiredDeviceExtensions(const char *dev_ext_name) { // Check if the extension has already been added if (CanEnableDeviceExtension(dev_ext_name)) { return true; } // If this is an instance extension, just return true under the assumption instance extensions do not depend on any device // extensions. const auto &instance_exts_map = InstanceExtensions::get_info_map(); if (instance_exts_map.count(dev_ext_name) != 0) { return true; } if (!DeviceExtensionSupported(gpu(), nullptr, dev_ext_name)) { return false; } m_device_extension_names.push_back(dev_ext_name); const auto &info = DeviceExtensions::get_info(dev_ext_name); for (const auto &req : info.requirements) { if (!AddRequiredDeviceExtensions(req.name)) { return false; } } return true; } bool VkRenderFramework::CanEnableDeviceExtension(const std::string &dev_ext_name) const { return std::any_of(m_device_extension_names.cbegin(), m_device_extension_names.cend(), [&dev_ext_name](const char *ext) { return dev_ext_name == ext; }); } void VkRenderFramework::ShutdownFramework() { debug_reporter_.error_monitor_.Reset(); // Nothing to shut down without a VkInstance if (!instance_) return; delete m_commandBuffer; m_commandBuffer = nullptr; delete m_commandPool; m_commandPool = nullptr; if (m_framebuffer) vk::DestroyFramebuffer(device(), m_framebuffer, NULL); m_framebuffer = VK_NULL_HANDLE; if (m_renderPass) vk::DestroyRenderPass(device(), m_renderPass, NULL); m_renderPass = VK_NULL_HANDLE; m_renderTargets.clear(); delete m_depthStencil; m_depthStencil = nullptr; // reset the driver delete m_device; m_device = nullptr; debug_reporter_.Destroy(instance_); vk::DestroyInstance(instance_, nullptr); instance_ = NULL; // In case we want to re-initialize } ErrorMonitor &VkRenderFramework::Monitor() { return debug_reporter_.error_monitor_; } void VkRenderFramework::GetPhysicalDeviceFeatures(VkPhysicalDeviceFeatures *features) { if (NULL == m_device) { VkDeviceObj *temp_device = new VkDeviceObj(0, gpu_, m_device_extension_names); *features = temp_device->phy().features(); delete (temp_device); } else { *features = m_device->phy().features(); } } bool VkRenderFramework::IsPlatform(PlatformType platform) { return (!vk_gpu_table.find(platform)->second.compare(physDevProps().deviceName)); } bool VkRenderFramework::IsDriver(VkDriverId driver_id) { // Assumes api version 1.2+ auto driver_properties = LvlInitStruct<VkPhysicalDeviceDriverProperties>(); auto physical_device_properties2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&driver_properties); vk::GetPhysicalDeviceProperties2(gpu_, &physical_device_properties2); return(driver_properties.driverID == driver_id); } void VkRenderFramework::GetPhysicalDeviceProperties(VkPhysicalDeviceProperties *props) { *props = physDevProps_; } void VkRenderFramework::InitState(VkPhysicalDeviceFeatures *features, void *create_device_pnext, const VkCommandPoolCreateFlags flags) { const auto ExtensionNotSupportedWithReporting = [this](const char *extension) { if (DeviceExtensionSupported(extension)) return false; else { ADD_FAILURE() << "InitState(): Requested device extension \"" << extension << "\" is not supported. It will be disabled."; return true; } }; RemoveIf(m_device_extension_names, ExtensionNotSupportedWithReporting); m_device = new VkDeviceObj(0, gpu_, m_device_extension_names, features, create_device_pnext); m_device->SetDeviceQueue(); m_depthStencil = new VkDepthStencilObj(m_device); m_render_target_fmt = VkTestFramework::GetFormat(instance_, m_device); m_lineWidth = 1.0f; m_depthBiasConstantFactor = 0.0f; m_depthBiasClamp = 0.0f; m_depthBiasSlopeFactor = 0.0f; m_blendConstants[0] = 1.0f; m_blendConstants[1] = 1.0f; m_blendConstants[2] = 1.0f; m_blendConstants[3] = 1.0f; m_minDepthBounds = 0.f; m_maxDepthBounds = 1.f; m_compareMask = 0xff; m_writeMask = 0xff; m_reference = 0; m_commandPool = new VkCommandPoolObj(m_device, m_device->graphics_queue_node_index_, flags); m_commandBuffer = new VkCommandBufferObj(m_device, m_commandPool); } void VkRenderFramework::InitViewport(float width, float height) { VkViewport viewport; VkRect2D scissor; viewport.x = 0; viewport.y = 0; viewport.width = 1.f * width; viewport.height = 1.f * height; viewport.minDepth = 0.f; viewport.maxDepth = 1.f; m_viewports.push_back(viewport); scissor.extent.width = (int32_t)width; scissor.extent.height = (int32_t)height; scissor.offset.x = 0; scissor.offset.y = 0; m_scissors.push_back(scissor); m_width = width; m_height = height; } void VkRenderFramework::InitViewport() { InitViewport(m_width, m_height); } bool VkRenderFramework::InitSurface() { return InitSurface(m_width, m_height, m_surface); } bool VkRenderFramework::InitSurface(float width, float height) { return InitSurface(width, height, m_surface); } #ifdef VK_USE_PLATFORM_WIN32_KHR LRESULT CALLBACK WindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { return DefWindowProc(hwnd, uMsg, wParam, lParam); } #endif // VK_USE_PLATFORM_WIN32_KHR bool VkRenderFramework::InitSurface(float width, float height, VkSurfaceKHR &surface) { #if defined(VK_USE_PLATFORM_WIN32_KHR) HINSTANCE window_instance = GetModuleHandle(nullptr); const char class_name[] = "test"; WNDCLASS wc = {}; wc.lpfnWndProc = WindowProc; wc.hInstance = window_instance; wc.lpszClassName = class_name; RegisterClass(&wc); HWND window = CreateWindowEx(0, class_name, 0, 0, 0, 0, (int)m_width, (int)m_height, NULL, NULL, window_instance, NULL); ShowWindow(window, SW_HIDE); VkWin32SurfaceCreateInfoKHR surface_create_info = {}; surface_create_info.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; surface_create_info.hinstance = window_instance; surface_create_info.hwnd = window; VkResult err = vk::CreateWin32SurfaceKHR(instance(), &surface_create_info, nullptr, &surface); if (err != VK_SUCCESS) return false; #endif #if defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(VALIDATION_APK) VkAndroidSurfaceCreateInfoKHR surface_create_info = {}; surface_create_info.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR; surface_create_info.window = VkTestFramework::window; VkResult err = vk::CreateAndroidSurfaceKHR(instance(), &surface_create_info, nullptr, &m_surface); if (err != VK_SUCCESS) return false; #endif #if defined(VK_USE_PLATFORM_XLIB_KHR) Display *dpy = XOpenDisplay(NULL); if (dpy) { int s = DefaultScreen(dpy); Window window = XCreateSimpleWindow(dpy, RootWindow(dpy, s), 0, 0, (int)m_width, (int)m_height, 1, BlackPixel(dpy, s), WhitePixel(dpy, s)); VkXlibSurfaceCreateInfoKHR surface_create_info = {}; surface_create_info.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR; surface_create_info.dpy = dpy; surface_create_info.window = window; VkResult err = vk::CreateXlibSurfaceKHR(instance(), &surface_create_info, nullptr, &m_surface); if (err != VK_SUCCESS) return false; } #endif #if defined(VK_USE_PLATFORM_XCB_KHR) if (m_surface == VK_NULL_HANDLE) { xcb_connection_t *connection = xcb_connect(NULL, NULL); if (connection) { xcb_window_t window = xcb_generate_id(connection); VkXcbSurfaceCreateInfoKHR surface_create_info = {}; surface_create_info.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; surface_create_info.connection = connection; surface_create_info.window = window; VkResult err = vk::CreateXcbSurfaceKHR(instance(), &surface_create_info, nullptr, &m_surface); if (err != VK_SUCCESS) return false; } } #endif return (m_surface == VK_NULL_HANDLE) ? false : true; } // Makes query to get information about swapchain needed to create a valid swapchain object each test creating a swapchain will need void VkRenderFramework::InitSwapchainInfo() { const VkPhysicalDevice physicalDevice = gpu(); vk::GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, m_surface, &m_surface_capabilities); uint32_t format_count; vk::GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, m_surface, &format_count, nullptr); if (format_count != 0) { m_surface_formats.resize(format_count); vk::GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, m_surface, &format_count, m_surface_formats.data()); } uint32_t present_mode_count; vk::GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, m_surface, &present_mode_count, nullptr); if (present_mode_count != 0) { m_surface_present_modes.resize(present_mode_count); vk::GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, m_surface, &present_mode_count, m_surface_present_modes.data()); // Shared Present mode has different requirements most tests won't actually want // Implementation required to support a non-shared present mode for (size_t i = 0; i < m_surface_present_modes.size(); i++) { const VkPresentModeKHR present_mode = m_surface_present_modes[i]; if ((present_mode != VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR) && (present_mode != VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR)) { m_surface_non_shared_present_mode = present_mode; break; } } } #ifdef VK_USE_PLATFORM_ANDROID_KHR m_surface_composite_alpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR; #else m_surface_composite_alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; #endif } bool VkRenderFramework::InitSwapchain(VkImageUsageFlags imageUsage, VkSurfaceTransformFlagBitsKHR preTransform) { if (InitSurface()) { return InitSwapchain(m_surface, imageUsage, preTransform); } return false; } bool VkRenderFramework::InitSwapchain(VkSurfaceKHR &surface, VkImageUsageFlags imageUsage, VkSurfaceTransformFlagBitsKHR preTransform) { return InitSwapchain(surface, imageUsage, preTransform, m_swapchain); } bool VkRenderFramework::InitSwapchain(VkSurfaceKHR &surface, VkImageUsageFlags imageUsage, VkSurfaceTransformFlagBitsKHR preTransform, VkSwapchainKHR &swapchain, VkSwapchainKHR oldSwapchain) { InitSwapchainInfo(); VkBool32 supported; vk::GetPhysicalDeviceSurfaceSupportKHR(gpu(), m_device->graphics_queue_node_index_, surface, &supported); if (!supported) { // Graphics queue does not support present return false; } VkSwapchainCreateInfoKHR swapchain_create_info = {}; swapchain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swapchain_create_info.pNext = 0; swapchain_create_info.surface = surface; swapchain_create_info.minImageCount = m_surface_capabilities.minImageCount; swapchain_create_info.imageFormat = m_surface_formats[0].format; swapchain_create_info.imageColorSpace = m_surface_formats[0].colorSpace; swapchain_create_info.imageExtent = {m_surface_capabilities.minImageExtent.width, m_surface_capabilities.minImageExtent.height}; swapchain_create_info.imageArrayLayers = 1; swapchain_create_info.imageUsage = imageUsage; swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swapchain_create_info.preTransform = preTransform; swapchain_create_info.compositeAlpha = m_surface_composite_alpha; swapchain_create_info.presentMode = m_surface_non_shared_present_mode; swapchain_create_info.clipped = VK_FALSE; swapchain_create_info.oldSwapchain = oldSwapchain; VkResult err = vk::CreateSwapchainKHR(device(), &swapchain_create_info, nullptr, &swapchain); if (err != VK_SUCCESS) { return false; } uint32_t imageCount = 0; vk::GetSwapchainImagesKHR(device(), swapchain, &imageCount, nullptr); vector<VkImage> swapchainImages; swapchainImages.resize(imageCount); vk::GetSwapchainImagesKHR(device(), swapchain, &imageCount, swapchainImages.data()); return true; } void VkRenderFramework::DestroySwapchain() { if (m_swapchain != VK_NULL_HANDLE) { vk::DestroySwapchainKHR(device(), m_swapchain, nullptr); m_swapchain = VK_NULL_HANDLE; } if (m_surface != VK_NULL_HANDLE) { vk::DestroySurfaceKHR(instance(), m_surface, nullptr); m_surface = VK_NULL_HANDLE; } } void VkRenderFramework::InitRenderTarget() { InitRenderTarget(1); } void VkRenderFramework::InitRenderTarget(uint32_t targets) { InitRenderTarget(targets, NULL); } void VkRenderFramework::InitRenderTarget(VkImageView *dsBinding) { InitRenderTarget(1, dsBinding); } void VkRenderFramework::InitRenderTarget(uint32_t targets, VkImageView *dsBinding) { vector<VkAttachmentDescription> &attachments = m_renderPass_attachments; vector<VkAttachmentReference> color_references; vector<VkImageView> &bindings = m_framebuffer_attachments; attachments.reserve(targets + 1); // +1 for dsBinding color_references.reserve(targets); bindings.reserve(targets + 1); // +1 for dsBinding VkAttachmentDescription att = {}; att.format = m_render_target_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = (m_clear_via_load_op) ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = (m_clear_via_load_op) ? VK_IMAGE_LAYOUT_UNDEFINED : VK_IMAGE_LAYOUT_GENERAL; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference ref = {}; ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_renderPassClearValues.clear(); VkClearValue clear = {}; clear.color = m_clear_color; for (uint32_t i = 0; i < targets; i++) { attachments.push_back(att); ref.attachment = i; color_references.push_back(ref); m_renderPassClearValues.push_back(clear); std::unique_ptr<VkImageObj> img(new VkImageObj(m_device)); VkFormatProperties props; vk::GetPhysicalDeviceFormatProperties(m_device->phy().handle(), m_render_target_fmt, &props); if (props.linearTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) { img->Init((uint32_t)m_width, (uint32_t)m_height, 1, m_render_target_fmt, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_LINEAR); } else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) { img->Init((uint32_t)m_width, (uint32_t)m_height, 1, m_render_target_fmt, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); } else { FAIL() << "Neither Linear nor Optimal allowed for render target"; } bindings.push_back(img->targetView(m_render_target_fmt)); m_renderTargets.push_back(std::move(img)); } m_renderPass_subpasses.clear(); m_renderPass_subpasses.resize(1); VkSubpassDescription &subpass = m_renderPass_subpasses[0]; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = targets; subpass.pColorAttachments = color_references.data(); subpass.pResolveAttachments = NULL; VkAttachmentReference ds_reference; if (dsBinding) { att.format = m_depth_stencil_fmt; att.loadOp = (m_clear_via_load_op) ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD; ; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = (m_clear_via_load_op) ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments.push_back(att); clear.depthStencil.depth = m_depth_clear_color; clear.depthStencil.stencil = m_stencil_clear_color; m_renderPassClearValues.push_back(clear); bindings.push_back(*dsBinding); ds_reference.attachment = targets; ds_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; subpass.pDepthStencilAttachment = &ds_reference; } else { subpass.pDepthStencilAttachment = NULL; } subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo &rp_info = m_renderPass_info; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = attachments.size(); rp_info.pAttachments = attachments.data(); rp_info.subpassCount = m_renderPass_subpasses.size(); rp_info.pSubpasses = m_renderPass_subpasses.data(); m_renderPass_dependencies.clear(); if (m_addRenderPassSelfDependency) { m_renderPass_dependencies.resize(1); VkSubpassDependency &subpass_dep = m_renderPass_dependencies[0]; // Add a subpass self-dependency to subpass 0 of default renderPass subpass_dep.srcSubpass = 0; subpass_dep.dstSubpass = 0; // Just using all framebuffer-space pipeline stages in order to get a reasonably large // set of bits that can be used for both src & dst subpass_dep.srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; subpass_dep.dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; // Add all of the gfx mem access bits that correlate to the fb-space pipeline stages subpass_dep.srcAccessMask = VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; subpass_dep.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; // Must include dep_by_region bit when src & dst both include framebuffer-space stages subpass_dep.dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; rp_info.dependencyCount = 1; rp_info.pDependencies = &subpass_dep; } else { rp_info.dependencyCount = 0; rp_info.pDependencies = nullptr; } vk::CreateRenderPass(device(), &rp_info, NULL, &m_renderPass); // Create Framebuffer and RenderPass with color attachments and any // depth/stencil attachment VkFramebufferCreateInfo &fb_info = m_framebuffer_info; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = m_renderPass; fb_info.attachmentCount = bindings.size(); fb_info.pAttachments = bindings.data(); fb_info.width = (uint32_t)m_width; fb_info.height = (uint32_t)m_height; fb_info.layers = 1; vk::CreateFramebuffer(device(), &fb_info, NULL, &m_framebuffer); m_renderPassBeginInfo.renderPass = m_renderPass; m_renderPassBeginInfo.framebuffer = m_framebuffer; m_renderPassBeginInfo.renderArea.extent.width = (int32_t)m_width; m_renderPassBeginInfo.renderArea.extent.height = (int32_t)m_height; m_renderPassBeginInfo.clearValueCount = m_renderPassClearValues.size(); m_renderPassBeginInfo.pClearValues = m_renderPassClearValues.data(); } void VkRenderFramework::DestroyRenderTarget() { vk::DestroyRenderPass(device(), m_renderPass, nullptr); m_renderPass = VK_NULL_HANDLE; vk::DestroyFramebuffer(device(), m_framebuffer, nullptr); m_framebuffer = VK_NULL_HANDLE; } bool VkRenderFramework::InitFrameworkAndRetrieveFeatures(VkPhysicalDeviceFeatures2KHR &features2) { if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("Instance extension %s not supported, skipping test\n", VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return false; } InitFramework(); // Cycle through device extensions and check for support for (auto extension : m_device_extension_names) { if (!DeviceExtensionSupported(extension)) { printf("Device extension %s is not supported\n", extension); return false; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); if (vkGetPhysicalDeviceFeatures2KHR) { vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); return true; } else { printf("Cannot use vkGetPhysicalDeviceFeatures to determine available features\n"); return false; } } VkDeviceObj::VkDeviceObj(uint32_t id, VkPhysicalDevice obj) : vk_testing::Device(obj), id(id) { init(); props = phy().properties(); queue_props = phy().queue_properties(); } VkDeviceObj::VkDeviceObj(uint32_t id, VkPhysicalDevice obj, vector<const char *> &extension_names, VkPhysicalDeviceFeatures *features, void *create_device_pnext) : vk_testing::Device(obj), id(id) { init(extension_names, features, create_device_pnext); props = phy().properties(); queue_props = phy().queue_properties(); } uint32_t VkDeviceObj::QueueFamilyMatching(VkQueueFlags with, VkQueueFlags without, bool all_bits) { // Find a queue family with and without desired capabilities for (uint32_t i = 0; i < queue_props.size(); i++) { auto flags = queue_props[i].queueFlags; bool matches = all_bits ? (flags & with) == with : (flags & with) != 0; if (matches && ((flags & without) == 0) && (queue_props[i].queueCount > 0)) { return i; } } return UINT32_MAX; } void VkDeviceObj::SetDeviceQueue() { ASSERT_NE(true, graphics_queues().empty()); m_queue = graphics_queues()[0]->handle(); } VkQueueObj *VkDeviceObj::GetDefaultQueue() { if (graphics_queues().empty()) return nullptr; return graphics_queues()[0]; } VkQueueObj *VkDeviceObj::GetDefaultComputeQueue() { if (compute_queues().empty()) return nullptr; return compute_queues()[0]; } VkDescriptorSetLayoutObj::VkDescriptorSetLayoutObj(const VkDeviceObj *device, const vector<VkDescriptorSetLayoutBinding> &descriptor_set_bindings, VkDescriptorSetLayoutCreateFlags flags, void *pNext) { VkDescriptorSetLayoutCreateInfo dsl_ci = {}; dsl_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; dsl_ci.pNext = pNext; dsl_ci.flags = flags; dsl_ci.bindingCount = static_cast<uint32_t>(descriptor_set_bindings.size()); dsl_ci.pBindings = descriptor_set_bindings.data(); init(*device, dsl_ci); } VkDescriptorSetObj::VkDescriptorSetObj(VkDeviceObj *device) : m_device(device), m_nextSlot(0) {} VkDescriptorSetObj::~VkDescriptorSetObj() NOEXCEPT { if (m_set) { delete m_set; } } int VkDescriptorSetObj::AppendDummy() { /* request a descriptor but do not update it */ VkDescriptorSetLayoutBinding binding = {}; binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; binding.descriptorCount = 1; binding.binding = m_layout_bindings.size(); binding.stageFlags = VK_SHADER_STAGE_ALL; binding.pImmutableSamplers = NULL; m_layout_bindings.push_back(binding); m_type_counts[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] += binding.descriptorCount; return m_nextSlot++; } int VkDescriptorSetObj::AppendBuffer(VkDescriptorType type, VkConstantBufferObj &constantBuffer) { assert(type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC); VkDescriptorSetLayoutBinding binding = {}; binding.descriptorType = type; binding.descriptorCount = 1; binding.binding = m_layout_bindings.size(); binding.stageFlags = VK_SHADER_STAGE_ALL; binding.pImmutableSamplers = NULL; m_layout_bindings.push_back(binding); m_type_counts[type] += binding.descriptorCount; m_writes.push_back(vk_testing::Device::write_descriptor_set(vk_testing::DescriptorSet(), m_nextSlot, 0, type, 1, &constantBuffer.m_descriptorBufferInfo)); return m_nextSlot++; } int VkDescriptorSetObj::AppendSamplerTexture(VkSamplerObj *sampler, VkTextureObj *texture) { VkDescriptorSetLayoutBinding binding = {}; binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; binding.descriptorCount = 1; binding.binding = m_layout_bindings.size(); binding.stageFlags = VK_SHADER_STAGE_ALL; binding.pImmutableSamplers = NULL; m_layout_bindings.push_back(binding); m_type_counts[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] += binding.descriptorCount; VkDescriptorImageInfo tmp = texture->DescriptorImageInfo(); tmp.sampler = sampler->handle(); m_imageSamplerDescriptors.push_back(tmp); m_writes.push_back(vk_testing::Device::write_descriptor_set(vk_testing::DescriptorSet(), m_nextSlot, 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, &tmp)); return m_nextSlot++; } VkPipelineLayout VkDescriptorSetObj::GetPipelineLayout() const { return m_pipeline_layout.handle(); } VkDescriptorSetLayout VkDescriptorSetObj::GetDescriptorSetLayout() const { return m_layout.handle(); } VkDescriptorSet VkDescriptorSetObj::GetDescriptorSetHandle() const { if (m_set) return m_set->handle(); else return VK_NULL_HANDLE; } void VkDescriptorSetObj::CreateVKDescriptorSet(VkCommandBufferObj *commandBuffer) { if (m_type_counts.size()) { // create VkDescriptorPool VkDescriptorPoolSize poolSize; vector<VkDescriptorPoolSize> sizes; for (auto it = m_type_counts.begin(); it != m_type_counts.end(); ++it) { poolSize.descriptorCount = it->second; poolSize.type = it->first; sizes.push_back(poolSize); } VkDescriptorPoolCreateInfo pool = {}; pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool.poolSizeCount = sizes.size(); pool.maxSets = 1; pool.pPoolSizes = sizes.data(); init(*m_device, pool); } // create VkDescriptorSetLayout VkDescriptorSetLayoutCreateInfo layout = {}; layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; layout.bindingCount = m_layout_bindings.size(); layout.pBindings = m_layout_bindings.data(); m_layout.init(*m_device, layout); vector<const vk_testing::DescriptorSetLayout *> layouts; layouts.push_back(&m_layout); // create VkPipelineLayout VkPipelineLayoutCreateInfo pipeline_layout = {}; pipeline_layout.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout.setLayoutCount = layouts.size(); pipeline_layout.pSetLayouts = NULL; m_pipeline_layout.init(*m_device, pipeline_layout, layouts); if (m_type_counts.size()) { // create VkDescriptorSet m_set = alloc_sets(*m_device, m_layout); // build the update array size_t imageSamplerCount = 0; for (vector<VkWriteDescriptorSet>::iterator it = m_writes.begin(); it != m_writes.end(); it++) { it->dstSet = m_set->handle(); if (it->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) it->pImageInfo = &m_imageSamplerDescriptors[imageSamplerCount++]; } // do the updates m_device->update_descriptor_sets(m_writes); } } VkRenderpassObj::VkRenderpassObj(VkDeviceObj *dev, const VkFormat format) { // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; subpass.colorAttachmentCount = 1; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; init(*dev, rpci); } VkRenderpassObj::VkRenderpassObj(VkDeviceObj *dev, VkFormat format, bool depthStencil) { if (!depthStencil) { VkRenderpassObj(dev, format); } else { // Create a renderPass with a depth/stencil attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pDepthStencilAttachment = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; init(*dev, rpci); } } VkImageObj::VkImageObj(VkDeviceObj *dev) { m_device = dev; m_descriptorImageInfo.imageView = VK_NULL_HANDLE; m_descriptorImageInfo.imageLayout = VK_IMAGE_LAYOUT_GENERAL; m_arrayLayers = 0; m_mipLevels = 0; } // clang-format off void VkImageObj::ImageMemoryBarrier(VkCommandBufferObj *cmd_buf, VkImageAspectFlags aspect, VkFlags output_mask /*= VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_MEMORY_OUTPUT_COPY_BIT*/, VkFlags input_mask /*= VK_ACCESS_HOST_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_MEMORY_INPUT_COPY_BIT*/, VkImageLayout image_layout, VkPipelineStageFlags src_stages, VkPipelineStageFlags dest_stages, uint32_t srcQueueFamilyIndex, uint32_t dstQueueFamilyIndex) { // clang-format on const VkImageSubresourceRange subresourceRange = subresource_range(aspect, 0, m_mipLevels, 0, m_arrayLayers); VkImageMemoryBarrier barrier; barrier = image_memory_barrier(output_mask, input_mask, Layout(), image_layout, subresourceRange, srcQueueFamilyIndex, dstQueueFamilyIndex); VkImageMemoryBarrier *pmemory_barrier = &barrier; // write barrier to the command buffer vk::CmdPipelineBarrier(cmd_buf->handle(), src_stages, dest_stages, VK_DEPENDENCY_BY_REGION_BIT, 0, NULL, 0, NULL, 1, pmemory_barrier); } void VkImageObj::SetLayout(VkCommandBufferObj *cmd_buf, VkImageAspectFlags aspect, VkImageLayout image_layout) { VkFlags src_mask, dst_mask; const VkFlags all_cache_outputs = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; const VkFlags all_cache_inputs = VK_ACCESS_HOST_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_MEMORY_READ_BIT; const VkFlags shader_read_inputs = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_MEMORY_READ_BIT; if (image_layout == m_descriptorImageInfo.imageLayout) { return; } // Attempt to narrow the src_mask, by what the image could have validly been used for in it's current layout switch (m_descriptorImageInfo.imageLayout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: src_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; break; case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: src_mask = shader_read_inputs; break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: src_mask = VK_ACCESS_TRANSFER_WRITE_BIT; break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: src_mask = VK_ACCESS_TRANSFER_READ_BIT; break; case VK_IMAGE_LAYOUT_UNDEFINED: src_mask = 0; break; default: src_mask = all_cache_outputs; // Only need to worry about writes, as the stage mask will protect reads } // Narrow the dst mask by the valid accesss for the new layout switch (image_layout) { case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: // NOTE: not sure why shader read is here... dst_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT; break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: dst_mask = VK_ACCESS_TRANSFER_WRITE_BIT; break; case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: dst_mask = shader_read_inputs; break; case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: dst_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: dst_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; break; default: // Must wait all read and write operations for the completion of the layout tranisition dst_mask = all_cache_inputs | all_cache_outputs; break; } ImageMemoryBarrier(cmd_buf, aspect, src_mask, dst_mask, image_layout); m_descriptorImageInfo.imageLayout = image_layout; } void VkImageObj::SetLayout(VkImageAspectFlags aspect, VkImageLayout image_layout) { if (image_layout == m_descriptorImageInfo.imageLayout) { return; } VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_); VkCommandBufferObj cmd_buf(m_device, &pool); /* Build command buffer to set image layout in the driver */ cmd_buf.begin(); SetLayout(&cmd_buf, aspect, image_layout); cmd_buf.end(); cmd_buf.QueueCommandBuffer(); } bool VkImageObj::IsCompatible(const VkImageUsageFlags usages, const VkFormatFeatureFlags features) { VkFormatFeatureFlags all_feature_flags = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT | VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT | VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT; if (m_device->IsEnabledExtension(VK_IMG_FILTER_CUBIC_EXTENSION_NAME)) { all_feature_flags |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG; } if (m_device->IsEnabledExtension(VK_KHR_MAINTENANCE_1_EXTENSION_NAME)) { all_feature_flags |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR; } if (m_device->IsEnabledExtension(VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME)) { all_feature_flags |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT; } if (m_device->IsEnabledExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME)) { all_feature_flags |= VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR | VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR | VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR | VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR | VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR | VK_FORMAT_FEATURE_DISJOINT_BIT_KHR | VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR; } if ((features & all_feature_flags) == 0) return false; // whole format unsupported if ((usages & VK_IMAGE_USAGE_SAMPLED_BIT) && !(features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) return false; if ((usages & VK_IMAGE_USAGE_STORAGE_BIT) && !(features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) return false; if ((usages & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) return false; if ((usages & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) return false; if (m_device->IsEnabledExtension(VK_KHR_MAINTENANCE_1_EXTENSION_NAME)) { // WORKAROUND: for DevSim not reporting extended enums, and possibly some drivers too const auto all_nontransfer_feature_flags = all_feature_flags ^ (VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR); const bool transfer_probably_supported_anyway = (features & all_nontransfer_feature_flags) > 0; if (!transfer_probably_supported_anyway) { if ((usages & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) && !(features & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR)) return false; if ((usages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) && !(features & VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) return false; } } return true; } VkImageCreateInfo VkImageObj::ImageCreateInfo2D(uint32_t const width, uint32_t const height, uint32_t const mipLevels, uint32_t const layers, VkFormat const format, VkFlags const usage, VkImageTiling const requested_tiling, const std::vector<uint32_t> *queue_families) { VkImageCreateInfo imageCreateInfo = vk_testing::Image::create_info(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.extent.width = width; imageCreateInfo.extent.height = height; imageCreateInfo.mipLevels = mipLevels; imageCreateInfo.arrayLayers = layers; imageCreateInfo.tiling = requested_tiling; // This will be touched up below... imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Automatically set sharing mode etc. based on queue family information if (queue_families && (queue_families->size() > 1)) { imageCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; imageCreateInfo.queueFamilyIndexCount = static_cast<uint32_t>(queue_families->size()); imageCreateInfo.pQueueFamilyIndices = queue_families->data(); } imageCreateInfo.usage = usage; return imageCreateInfo; } void VkImageObj::InitNoLayout(uint32_t const width, uint32_t const height, uint32_t const mipLevels, VkFormat const format, VkFlags const usage, VkImageTiling const requested_tiling, VkMemoryPropertyFlags const reqs, const vector<uint32_t> *queue_families, bool memory) { InitNoLayout(ImageCreateInfo2D(width, height, mipLevels, 1, format, usage, requested_tiling, queue_families), reqs, memory); } void VkImageObj::InitNoLayout(const VkImageCreateInfo &create_info, VkMemoryPropertyFlags const reqs, bool memory) { VkFormatProperties image_fmt; // Touch up create info for tiling compatiblity... auto usage = create_info.usage; VkImageTiling requested_tiling = create_info.tiling; VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; vk::GetPhysicalDeviceFormatProperties(m_device->phy().handle(), create_info.format, &image_fmt); if (requested_tiling == VK_IMAGE_TILING_LINEAR) { if (IsCompatible(usage, image_fmt.linearTilingFeatures)) { tiling = VK_IMAGE_TILING_LINEAR; } else if (IsCompatible(usage, image_fmt.optimalTilingFeatures)) { tiling = VK_IMAGE_TILING_OPTIMAL; } else { FAIL() << "VkImageObj::init() error: unsupported tiling configuration. Usage: " << std::hex << std::showbase << usage << ", supported linear features: " << image_fmt.linearTilingFeatures; } } else if (IsCompatible(usage, image_fmt.optimalTilingFeatures)) { tiling = VK_IMAGE_TILING_OPTIMAL; } else if (IsCompatible(usage, image_fmt.linearTilingFeatures)) { tiling = VK_IMAGE_TILING_LINEAR; } else { FAIL() << "VkImageObj::init() error: unsupported tiling configuration. Usage: " << std::hex << std::showbase << usage << ", supported optimal features: " << image_fmt.optimalTilingFeatures; } VkImageCreateInfo imageCreateInfo = create_info; imageCreateInfo.tiling = tiling; m_mipLevels = imageCreateInfo.mipLevels; m_arrayLayers = imageCreateInfo.arrayLayers; Layout(imageCreateInfo.initialLayout); if (memory) vk_testing::Image::init(*m_device, imageCreateInfo, reqs); else vk_testing::Image::init_no_mem(*m_device, imageCreateInfo); } void VkImageObj::Init(uint32_t const width, uint32_t const height, uint32_t const mipLevels, VkFormat const format, VkFlags const usage, VkImageTiling const requested_tiling, VkMemoryPropertyFlags const reqs, const vector<uint32_t> *queue_families, bool memory) { Init(ImageCreateInfo2D(width, height, mipLevels, 1, format, usage, requested_tiling, queue_families), reqs, memory); } void VkImageObj::Init(const VkImageCreateInfo &create_info, VkMemoryPropertyFlags const reqs, bool memory) { InitNoLayout(create_info, reqs, memory); if (!initialized() || !memory) return; // We don't have a valid handle from early stage init, and thus SetLayout will fail VkImageLayout newLayout; const auto usage = create_info.usage; if (usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; else if (usage & VK_IMAGE_USAGE_SAMPLED_BIT) newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; else newLayout = m_descriptorImageInfo.imageLayout; VkImageAspectFlags image_aspect = 0; const auto format = create_info.format; if (FormatIsDepthAndStencil(format)) { image_aspect = VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; } else if (FormatIsDepthOnly(format)) { image_aspect = VK_IMAGE_ASPECT_DEPTH_BIT; } else if (FormatIsStencilOnly(format)) { image_aspect = VK_IMAGE_ASPECT_STENCIL_BIT; } else { // color image_aspect = VK_IMAGE_ASPECT_COLOR_BIT; } SetLayout(image_aspect, newLayout); } void VkImageObj::init(const VkImageCreateInfo *create_info) { VkFormatProperties image_fmt; vk::GetPhysicalDeviceFormatProperties(m_device->phy().handle(), create_info->format, &image_fmt); switch (create_info->tiling) { case VK_IMAGE_TILING_OPTIMAL: if (!IsCompatible(create_info->usage, image_fmt.optimalTilingFeatures)) { FAIL() << "VkImageObj::init() error: unsupported tiling configuration. Usage: " << std::hex << std::showbase << create_info->usage << ", supported optimal features: " << image_fmt.optimalTilingFeatures; } break; case VK_IMAGE_TILING_LINEAR: if (!IsCompatible(create_info->usage, image_fmt.linearTilingFeatures)) { FAIL() << "VkImageObj::init() error: unsupported tiling configuration. Usage: " << std::hex << std::showbase << create_info->usage << ", supported linear features: " << image_fmt.linearTilingFeatures; } break; default: break; } Layout(create_info->initialLayout); vk_testing::Image::init(*m_device, *create_info, 0); m_mipLevels = create_info->mipLevels; m_arrayLayers = create_info->arrayLayers; VkImageAspectFlags image_aspect = 0; if (FormatIsDepthAndStencil(create_info->format)) { image_aspect = VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; } else if (FormatIsDepthOnly(create_info->format)) { image_aspect = VK_IMAGE_ASPECT_DEPTH_BIT; } else if (FormatIsStencilOnly(create_info->format)) { image_aspect = VK_IMAGE_ASPECT_STENCIL_BIT; } else { // color image_aspect = VK_IMAGE_ASPECT_COLOR_BIT; } SetLayout(image_aspect, VK_IMAGE_LAYOUT_GENERAL); } bool VkImageObj::IsCompatibleCheck(const VkImageCreateInfo &create_info) { VkFormatProperties image_fmt; vk::GetPhysicalDeviceFormatProperties(m_device->phy().handle(), create_info.format, &image_fmt); switch (create_info.tiling) { case VK_IMAGE_TILING_OPTIMAL: return IsCompatible(create_info.usage, image_fmt.optimalTilingFeatures); case VK_IMAGE_TILING_LINEAR: return IsCompatible(create_info.usage, image_fmt.linearTilingFeatures); default: return true; } } VkResult VkImageObj::CopyImage(VkImageObj &src_image) { VkImageLayout src_image_layout, dest_image_layout; VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_); VkCommandBufferObj cmd_buf(m_device, &pool); /* Build command buffer to copy staging texture to usable texture */ cmd_buf.begin(); /* TODO: Can we determine image aspect from image object? */ src_image_layout = src_image.Layout(); src_image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dest_image_layout = (this->Layout() == VK_IMAGE_LAYOUT_UNDEFINED) ? VK_IMAGE_LAYOUT_GENERAL : this->Layout(); this->SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageCopy copy_region = {}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent = src_image.extent(); vk::CmdCopyImage(cmd_buf.handle(), src_image.handle(), src_image.Layout(), handle(), Layout(), 1, &copy_region); src_image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, src_image_layout); this->SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, dest_image_layout); cmd_buf.end(); cmd_buf.QueueCommandBuffer(); return VK_SUCCESS; } // Same as CopyImage, but in the opposite direction VkResult VkImageObj::CopyImageOut(VkImageObj &dst_image) { VkImageLayout src_image_layout, dest_image_layout; VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_); VkCommandBufferObj cmd_buf(m_device, &pool); cmd_buf.begin(); src_image_layout = this->Layout(); this->SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dest_image_layout = (dst_image.Layout() == VK_IMAGE_LAYOUT_UNDEFINED) ? VK_IMAGE_LAYOUT_GENERAL : dst_image.Layout(); dst_image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageCopy copy_region = {}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent = dst_image.extent(); vk::CmdCopyImage(cmd_buf.handle(), handle(), Layout(), dst_image.handle(), dst_image.Layout(), 1, &copy_region); this->SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, src_image_layout); dst_image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, dest_image_layout); cmd_buf.end(); cmd_buf.QueueCommandBuffer(); return VK_SUCCESS; } // Return 16x16 pixel block std::array<std::array<uint32_t, 16>, 16> VkImageObj::Read() { VkImageObj stagingImage(m_device); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; stagingImage.Init(16, 16, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_LINEAR, reqs); stagingImage.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkSubresourceLayout layout = stagingImage.subresource_layout(subresource(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0)); CopyImageOut(stagingImage); void *data = stagingImage.MapMemory(); std::array<std::array<uint32_t, 16>, 16> m = {}; if (data) { for (uint32_t y = 0; y < stagingImage.extent().height; y++) { uint32_t *row = (uint32_t *)((char *)data + layout.rowPitch * y); for (uint32_t x = 0; x < stagingImage.extent().width; x++) m[y][x] = row[x]; } } stagingImage.UnmapMemory(); return m; } VkTextureObj::VkTextureObj(VkDeviceObj *device, uint32_t *colors) : VkImageObj(device) { m_device = device; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; uint32_t tex_colors[2] = {0xffff0000, 0xff00ff00}; void *data; uint32_t x, y; VkImageObj stagingImage(device); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; stagingImage.Init(16, 16, 1, tex_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_LINEAR, reqs); VkSubresourceLayout layout = stagingImage.subresource_layout(subresource(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0)); if (colors == NULL) colors = tex_colors; VkImageViewCreateInfo view = {}; view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view.pNext = NULL; view.image = VK_NULL_HANDLE; view.viewType = VK_IMAGE_VIEW_TYPE_2D; view.format = tex_format; view.components.r = VK_COMPONENT_SWIZZLE_R; view.components.g = VK_COMPONENT_SWIZZLE_G; view.components.b = VK_COMPONENT_SWIZZLE_B; view.components.a = VK_COMPONENT_SWIZZLE_A; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.levelCount = 1; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.layerCount = 1; /* create image */ Init(16, 16, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); stagingImage.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); /* create image view */ view.image = handle(); m_textureView.init(*m_device, view); m_descriptorImageInfo.imageView = m_textureView.handle(); data = stagingImage.MapMemory(); for (y = 0; y < extent().height; y++) { uint32_t *row = (uint32_t *)((char *)data + layout.rowPitch * y); for (x = 0; x < extent().width; x++) row[x] = colors[(x & 1) ^ (y & 1)]; } stagingImage.UnmapMemory(); stagingImage.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); VkImageObj::CopyImage(stagingImage); } VkSamplerObj::VkSamplerObj(VkDeviceObj *device) { m_device = device; VkSamplerCreateInfo samplerCreateInfo; memset(&samplerCreateInfo, 0, sizeof(samplerCreateInfo)); samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.magFilter = VK_FILTER_NEAREST; samplerCreateInfo.minFilter = VK_FILTER_NEAREST; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.mipLodBias = 0.0; samplerCreateInfo.anisotropyEnable = VK_FALSE; samplerCreateInfo.maxAnisotropy = 1; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0; samplerCreateInfo.maxLod = 0.0; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; samplerCreateInfo.unnormalizedCoordinates = VK_FALSE; init(*m_device, samplerCreateInfo); } /* * Basic ConstantBuffer constructor. Then use create methods to fill in the * details. */ VkConstantBufferObj::VkConstantBufferObj(VkDeviceObj *device, VkBufferUsageFlags usage) { m_device = device; memset(&m_descriptorBufferInfo, 0, sizeof(m_descriptorBufferInfo)); // Special case for usages outside of original limits of framework if ((VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT) != usage) { init_no_mem(*m_device, create_info(0, usage)); } } VkConstantBufferObj::VkConstantBufferObj(VkDeviceObj *device, VkDeviceSize allocationSize, const void *data, VkBufferUsageFlags usage) { m_device = device; memset(&m_descriptorBufferInfo, 0, sizeof(m_descriptorBufferInfo)); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; if ((VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT) == usage) { init_as_src_and_dst(*m_device, allocationSize, reqs); } else { init(*m_device, create_info(allocationSize, usage), reqs); } void *pData = memory().map(); memcpy(pData, data, static_cast<size_t>(allocationSize)); memory().unmap(); /* * Constant buffers are going to be used as vertex input buffers * or as shader uniform buffers. So, we'll create the shaderbuffer * descriptor here so it's ready if needed. */ this->m_descriptorBufferInfo.buffer = handle(); this->m_descriptorBufferInfo.offset = 0; this->m_descriptorBufferInfo.range = allocationSize; } VkPipelineShaderStageCreateInfo const &VkShaderObj::GetStageCreateInfo() const { return m_stage_info; } VkShaderObj::VkShaderObj(VkDeviceObj &device, VkShaderStageFlagBits stage, char const *name, const VkSpecializationInfo *specInfo) : m_device(device) { m_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; m_stage_info.pNext = nullptr; m_stage_info.flags = 0; m_stage_info.stage = stage; m_stage_info.module = VK_NULL_HANDLE; m_stage_info.pName = name; m_stage_info.pSpecializationInfo = specInfo; } VkShaderObj::VkShaderObj(VkDeviceObj *device, const char *shader_code, VkShaderStageFlagBits stage, VkRenderFramework *framework, char const *name, bool debug, const VkSpecializationInfo *specInfo, const spv_target_env env) : VkShaderObj(*device, stage, name, specInfo) { InitFromGLSL(*framework, shader_code, debug, env); } bool VkShaderObj::InitFromGLSL(VkRenderFramework &framework, const char *shader_code, bool debug, const spv_target_env env) { std::vector<uint32_t> spv; framework.GLSLtoSPV(&m_device.props.limits, m_stage_info.stage, shader_code, spv, debug, env); VkShaderModuleCreateInfo moduleCreateInfo = {}; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.codeSize = spv.size() * sizeof(uint32_t); moduleCreateInfo.pCode = spv.data(); init(m_device, moduleCreateInfo); m_stage_info.module = handle(); return VK_NULL_HANDLE != handle(); } // Because shaders are currently validated at pipeline creation time, there are test cases that might fail shader module creation // due to supplying an invalid/unknown SPIR-V capability/operation. This is called after VkShaderObj creation when tests are found // to crash on a CI device VkResult VkShaderObj::InitFromGLSLTry(VkRenderFramework &framework, const char *shader_code, bool debug, const spv_target_env env) { std::vector<uint32_t> spv; framework.GLSLtoSPV(&m_device.props.limits, m_stage_info.stage, shader_code, spv, debug, env); VkShaderModuleCreateInfo moduleCreateInfo = {}; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.codeSize = spv.size() * sizeof(uint32_t); moduleCreateInfo.pCode = spv.data(); const auto result = init_try(m_device, moduleCreateInfo); m_stage_info.module = handle(); return result; } VkShaderObj::VkShaderObj(VkDeviceObj *device, const string spv_source, VkShaderStageFlagBits stage, VkRenderFramework *framework, char const *name, const VkSpecializationInfo *specInfo, const spv_target_env env) : VkShaderObj(*device, stage, name, specInfo) { InitFromASM(*framework, spv_source, env); } bool VkShaderObj::InitFromASM(VkRenderFramework &framework, const std::string &spv_source, const spv_target_env env) { vector<uint32_t> spv; framework.ASMtoSPV(env, 0, spv_source.data(), spv); VkShaderModuleCreateInfo moduleCreateInfo = {}; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.codeSize = spv.size() * sizeof(uint32_t); moduleCreateInfo.pCode = spv.data(); init(m_device, moduleCreateInfo); m_stage_info.module = handle(); return VK_NULL_HANDLE != handle(); } VkResult VkShaderObj::InitFromASMTry(VkRenderFramework &framework, const std::string &spv_source, const spv_target_env spv_env) { vector<uint32_t> spv; framework.ASMtoSPV(spv_env, 0, spv_source.data(), spv); VkShaderModuleCreateInfo moduleCreateInfo = {}; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.codeSize = spv.size() * sizeof(uint32_t); moduleCreateInfo.pCode = spv.data(); const auto result = init_try(m_device, moduleCreateInfo); m_stage_info.module = handle(); return result; } // static std::unique_ptr<VkShaderObj> VkShaderObj::CreateFromGLSL(VkDeviceObj &dev, VkRenderFramework &framework, VkShaderStageFlagBits stage, const std::string &code, const char *entry_point, const VkSpecializationInfo *spec_info, const spv_target_env spv_env, bool debug) { auto shader = layer_data::make_unique<VkShaderObj>(dev, stage, entry_point, spec_info); if (VK_SUCCESS == shader->InitFromGLSLTry(framework, code.c_str(), debug, spv_env)) { return shader; } return {}; } // static std::unique_ptr<VkShaderObj> VkShaderObj::CreateFromASM(VkDeviceObj &dev, VkRenderFramework &framework, VkShaderStageFlagBits stage, const std::string &code, const char *entry_point, const VkSpecializationInfo *spec_info, const spv_target_env spv_env) { auto shader = layer_data::make_unique<VkShaderObj>(dev, stage, entry_point, spec_info); if (VK_SUCCESS == shader->InitFromASMTry(framework, code.c_str(), spv_env)) { return shader; } return {}; } VkPipelineLayoutObj::VkPipelineLayoutObj(VkDeviceObj *device, const vector<const VkDescriptorSetLayoutObj *> &descriptor_layouts, const vector<VkPushConstantRange> &push_constant_ranges) { VkPipelineLayoutCreateInfo pl_ci = {}; pl_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pl_ci.pushConstantRangeCount = static_cast<uint32_t>(push_constant_ranges.size()); pl_ci.pPushConstantRanges = push_constant_ranges.data(); auto descriptor_layouts_unwrapped = MakeTestbindingHandles<const vk_testing::DescriptorSetLayout>(descriptor_layouts); init(*device, pl_ci, descriptor_layouts_unwrapped); } void VkPipelineLayoutObj::Reset() { *this = VkPipelineLayoutObj(); } VkPipelineObj::VkPipelineObj(VkDeviceObj *device) { m_device = device; m_vi_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; m_vi_state.pNext = nullptr; m_vi_state.flags = 0; m_vi_state.vertexBindingDescriptionCount = 0; m_vi_state.pVertexBindingDescriptions = nullptr; m_vi_state.vertexAttributeDescriptionCount = 0; m_vi_state.pVertexAttributeDescriptions = nullptr; m_ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; m_ia_state.pNext = nullptr; m_ia_state.flags = 0; m_ia_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; m_ia_state.primitiveRestartEnable = VK_FALSE; m_te_state = nullptr; m_vp_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; m_vp_state.pNext = VK_NULL_HANDLE; m_vp_state.flags = 0; m_vp_state.viewportCount = 1; m_vp_state.scissorCount = 1; m_vp_state.pViewports = nullptr; m_vp_state.pScissors = nullptr; m_rs_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; m_rs_state.pNext = &m_line_state; m_rs_state.flags = 0; m_rs_state.depthClampEnable = VK_FALSE; m_rs_state.rasterizerDiscardEnable = VK_FALSE; m_rs_state.polygonMode = VK_POLYGON_MODE_FILL; m_rs_state.cullMode = VK_CULL_MODE_BACK_BIT; m_rs_state.frontFace = VK_FRONT_FACE_CLOCKWISE; m_rs_state.depthBiasEnable = VK_FALSE; m_rs_state.depthBiasConstantFactor = 0.0f; m_rs_state.depthBiasClamp = 0.0f; m_rs_state.depthBiasSlopeFactor = 0.0f; m_rs_state.lineWidth = 1.0f; m_line_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT; m_line_state.pNext = nullptr; m_line_state.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT; m_line_state.stippledLineEnable = VK_FALSE; m_line_state.lineStippleFactor = 0; m_line_state.lineStipplePattern = 0; m_ms_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; m_ms_state.pNext = nullptr; m_ms_state.flags = 0; m_ms_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; m_ms_state.sampleShadingEnable = VK_FALSE; m_ms_state.minSampleShading = 0.0f; m_ms_state.pSampleMask = nullptr; m_ms_state.alphaToCoverageEnable = VK_FALSE; m_ms_state.alphaToOneEnable = VK_FALSE; m_ds_state = nullptr; memset(&m_cb_state, 0, sizeof(m_cb_state)); m_cb_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; m_cb_state.blendConstants[0] = 1.0f; m_cb_state.blendConstants[1] = 1.0f; m_cb_state.blendConstants[2] = 1.0f; m_cb_state.blendConstants[3] = 1.0f; memset(&m_pd_state, 0, sizeof(m_pd_state)); } void VkPipelineObj::AddShader(VkShaderObj *shader) { m_shaderStages.push_back(shader->GetStageCreateInfo()); } void VkPipelineObj::AddShader(VkPipelineShaderStageCreateInfo const &createInfo) { m_shaderStages.push_back(createInfo); } void VkPipelineObj::AddVertexInputAttribs(VkVertexInputAttributeDescription *vi_attrib, uint32_t count) { m_vi_state.pVertexAttributeDescriptions = vi_attrib; m_vi_state.vertexAttributeDescriptionCount = count; } void VkPipelineObj::AddVertexInputBindings(VkVertexInputBindingDescription *vi_binding, uint32_t count) { m_vi_state.pVertexBindingDescriptions = vi_binding; m_vi_state.vertexBindingDescriptionCount = count; } void VkPipelineObj::AddColorAttachment(uint32_t binding, const VkPipelineColorBlendAttachmentState &att) { if (binding + 1 > m_colorAttachments.size()) { m_colorAttachments.resize(binding + 1); } m_colorAttachments[binding] = att; } void VkPipelineObj::SetDepthStencil(const VkPipelineDepthStencilStateCreateInfo *ds_state) { m_ds_state = ds_state; } void VkPipelineObj::SetViewport(const vector<VkViewport> viewports) { m_viewports = viewports; // If we explicitly set a null viewport, pass it through to create info // but preserve viewportCount because it musn't change if (m_viewports.size() == 0) { m_vp_state.pViewports = nullptr; } } void VkPipelineObj::SetScissor(const vector<VkRect2D> scissors) { m_scissors = scissors; // If we explicitly set a null scissor, pass it through to create info // but preserve scissorCount because it musn't change if (m_scissors.size() == 0) { m_vp_state.pScissors = nullptr; } } void VkPipelineObj::MakeDynamic(VkDynamicState state) { /* Only add a state once */ for (auto it = m_dynamic_state_enables.begin(); it != m_dynamic_state_enables.end(); it++) { if ((*it) == state) return; } m_dynamic_state_enables.push_back(state); } void VkPipelineObj::SetMSAA(const VkPipelineMultisampleStateCreateInfo *ms_state) { m_ms_state = *ms_state; } void VkPipelineObj::SetInputAssembly(const VkPipelineInputAssemblyStateCreateInfo *ia_state) { m_ia_state = *ia_state; } void VkPipelineObj::SetRasterization(const VkPipelineRasterizationStateCreateInfo *rs_state) { m_rs_state = *rs_state; m_rs_state.pNext = &m_line_state; } void VkPipelineObj::SetTessellation(const VkPipelineTessellationStateCreateInfo *te_state) { m_te_state = te_state; } void VkPipelineObj::SetLineState(const VkPipelineRasterizationLineStateCreateInfoEXT *line_state) { m_line_state = *line_state; } void VkPipelineObj::InitGraphicsPipelineCreateInfo(VkGraphicsPipelineCreateInfo *gp_ci) { gp_ci->stageCount = m_shaderStages.size(); gp_ci->pStages = m_shaderStages.size() ? m_shaderStages.data() : nullptr; m_vi_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; gp_ci->pVertexInputState = &m_vi_state; m_ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; gp_ci->pInputAssemblyState = &m_ia_state; gp_ci->sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci->pNext = NULL; gp_ci->flags = 0; m_cb_state.attachmentCount = m_colorAttachments.size(); m_cb_state.pAttachments = m_colorAttachments.data(); if (m_viewports.size() > 0) { m_vp_state.viewportCount = m_viewports.size(); m_vp_state.pViewports = m_viewports.data(); } else { MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); } if (m_scissors.size() > 0) { m_vp_state.scissorCount = m_scissors.size(); m_vp_state.pScissors = m_scissors.data(); } else { MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); } memset(&m_pd_state, 0, sizeof(m_pd_state)); if (m_dynamic_state_enables.size() > 0) { m_pd_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; m_pd_state.dynamicStateCount = m_dynamic_state_enables.size(); m_pd_state.pDynamicStates = m_dynamic_state_enables.data(); gp_ci->pDynamicState = &m_pd_state; } gp_ci->subpass = 0; gp_ci->pViewportState = &m_vp_state; gp_ci->pRasterizationState = &m_rs_state; gp_ci->pMultisampleState = &m_ms_state; gp_ci->pDepthStencilState = m_ds_state; gp_ci->pColorBlendState = &m_cb_state; gp_ci->pTessellationState = m_te_state; } VkResult VkPipelineObj::CreateVKPipeline(VkPipelineLayout layout, VkRenderPass render_pass, VkGraphicsPipelineCreateInfo *gp_ci) { VkGraphicsPipelineCreateInfo info = {}; // if not given a CreateInfo, create and initialize a local one. if (gp_ci == nullptr) { gp_ci = &info; InitGraphicsPipelineCreateInfo(gp_ci); } gp_ci->layout = layout; gp_ci->renderPass = render_pass; return init_try(*m_device, *gp_ci); } VkCommandBufferObj::VkCommandBufferObj(VkDeviceObj *device, VkCommandPoolObj *pool, VkCommandBufferLevel level, VkQueueObj *queue) { m_device = device; if (queue) { m_queue = queue; } else { m_queue = m_device->GetDefaultQueue(); } assert(m_queue); auto create_info = vk_testing::CommandBuffer::create_info(pool->handle()); create_info.level = level; init(*device, create_info); } void VkCommandBufferObj::PipelineBarrier(VkPipelineStageFlags src_stages, VkPipelineStageFlags dest_stages, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { vk::CmdPipelineBarrier(handle(), src_stages, dest_stages, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } void VkCommandBufferObj::PipelineBarrier2KHR(const VkDependencyInfoKHR *pDependencyInfo) { auto fpCmdPipelineBarrier2KHR = (PFN_vkCmdPipelineBarrier2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdPipelineBarrier2KHR"); assert(fpCmdPipelineBarrier2KHR != nullptr); fpCmdPipelineBarrier2KHR(handle(), pDependencyInfo); } void VkCommandBufferObj::ClearAllBuffers(const vector<std::unique_ptr<VkImageObj>> &color_objs, VkClearColorValue clear_color, VkDepthStencilObj *depth_stencil_obj, float depth_clear_value, uint32_t stencil_clear_value) { // whatever we want to do, we do it to the whole buffer VkImageSubresourceRange subrange = {}; // srRange.aspectMask to be set later subrange.baseMipLevel = 0; // TODO: Mali device crashing with VK_REMAINING_MIP_LEVELS subrange.levelCount = 1; // VK_REMAINING_MIP_LEVELS; subrange.baseArrayLayer = 0; // TODO: Mesa crashing with VK_REMAINING_ARRAY_LAYERS subrange.layerCount = 1; // VK_REMAINING_ARRAY_LAYERS; const VkImageLayout clear_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; for (const auto &color_obj : color_objs) { subrange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_obj->Layout(VK_IMAGE_LAYOUT_UNDEFINED); color_obj->SetLayout(this, subrange.aspectMask, clear_layout); ClearColorImage(color_obj->image(), clear_layout, &clear_color, 1, &subrange); } if (depth_stencil_obj && depth_stencil_obj->Initialized()) { subrange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if (FormatIsDepthOnly(depth_stencil_obj->format())) subrange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; if (FormatIsStencilOnly(depth_stencil_obj->format())) subrange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; depth_stencil_obj->Layout(VK_IMAGE_LAYOUT_UNDEFINED); depth_stencil_obj->SetLayout(this, subrange.aspectMask, clear_layout); VkClearDepthStencilValue clear_value = {depth_clear_value, stencil_clear_value}; ClearDepthStencilImage(depth_stencil_obj->handle(), clear_layout, &clear_value, 1, &subrange); } } void VkCommandBufferObj::FillBuffer(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize fill_size, uint32_t data) { vk::CmdFillBuffer(handle(), buffer, offset, fill_size, data); } void VkCommandBufferObj::UpdateBuffer(VkBuffer buffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) { vk::CmdUpdateBuffer(handle(), buffer, dstOffset, dataSize, pData); } void VkCommandBufferObj::CopyImage(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { vk::CmdCopyImage(handle(), srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); } void VkCommandBufferObj::ResolveImage(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { vk::CmdResolveImage(handle(), srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); } void VkCommandBufferObj::ClearColorImage(VkImage image, VkImageLayout imageLayout, const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { vk::CmdClearColorImage(handle(), image, imageLayout, pColor, rangeCount, pRanges); } void VkCommandBufferObj::ClearDepthStencilImage(VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { vk::CmdClearDepthStencilImage(handle(), image, imageLayout, pColor, rangeCount, pRanges); } void VkCommandBufferObj::BuildAccelerationStructure(VkAccelerationStructureObj *as, VkBuffer scratchBuffer) { BuildAccelerationStructure(as, scratchBuffer, VK_NULL_HANDLE); } void VkCommandBufferObj::BuildAccelerationStructure(VkAccelerationStructureObj *as, VkBuffer scratchBuffer, VkBuffer instanceData) { PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)vk::GetDeviceProcAddr(as->dev(), "vkCmdBuildAccelerationStructureNV"); assert(vkCmdBuildAccelerationStructureNV != nullptr); vkCmdBuildAccelerationStructureNV(handle(), &as->info(), instanceData, 0, VK_FALSE, as->handle(), VK_NULL_HANDLE, scratchBuffer, 0); } void VkCommandBufferObj::PrepareAttachments(const vector<std::unique_ptr<VkImageObj>> &color_atts, VkDepthStencilObj *depth_stencil_att) { for (const auto &color_att : color_atts) { color_att->SetLayout(this, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); } if (depth_stencil_att && depth_stencil_att->Initialized()) { VkImageAspectFlags aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; if (FormatIsDepthOnly(depth_stencil_att->Format())) aspect = VK_IMAGE_ASPECT_DEPTH_BIT; if (FormatIsStencilOnly(depth_stencil_att->Format())) aspect = VK_IMAGE_ASPECT_STENCIL_BIT; depth_stencil_att->SetLayout(this, aspect, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); } } void VkCommandBufferObj::BeginRenderPass(const VkRenderPassBeginInfo &info, VkSubpassContents contents) { vk::CmdBeginRenderPass(handle(), &info, contents); } void VkCommandBufferObj::EndRenderPass() { vk::CmdEndRenderPass(handle()); } void VkCommandBufferObj::SetViewport(uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { vk::CmdSetViewport(handle(), firstViewport, viewportCount, pViewports); } void VkCommandBufferObj::SetStencilReference(VkStencilFaceFlags faceMask, uint32_t reference) { vk::CmdSetStencilReference(handle(), faceMask, reference); } void VkCommandBufferObj::DrawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) { vk::CmdDrawIndexed(handle(), indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); } void VkCommandBufferObj::Draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) { vk::CmdDraw(handle(), vertexCount, instanceCount, firstVertex, firstInstance); } void VkCommandBufferObj::QueueCommandBuffer(bool checkSuccess) { VkFenceObj nullFence; QueueCommandBuffer(nullFence, checkSuccess); } void VkCommandBufferObj::QueueCommandBuffer(const VkFenceObj &fence, bool checkSuccess) { VkResult err = VK_SUCCESS; err = m_queue->submit(*this, fence, checkSuccess); if (checkSuccess) { ASSERT_VK_SUCCESS(err); } err = m_queue->wait(); if (checkSuccess) { ASSERT_VK_SUCCESS(err); } // TODO: Determine if we really want this serialization here // Wait for work to finish before cleaning up. vk::DeviceWaitIdle(m_device->device()); } void VkCommandBufferObj::BindDescriptorSet(VkDescriptorSetObj &descriptorSet) { VkDescriptorSet set_obj = descriptorSet.GetDescriptorSetHandle(); // bind pipeline, vertex buffer (descriptor set) and WVP (dynamic buffer view) if (set_obj) { vk::CmdBindDescriptorSets(handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, descriptorSet.GetPipelineLayout(), 0, 1, &set_obj, 0, NULL); } } void VkCommandBufferObj::BindIndexBuffer(VkBufferObj *indexBuffer, VkDeviceSize offset, VkIndexType indexType) { vk::CmdBindIndexBuffer(handle(), indexBuffer->handle(), offset, indexType); } void VkCommandBufferObj::BindVertexBuffer(VkConstantBufferObj *vertexBuffer, VkDeviceSize offset, uint32_t binding) { vk::CmdBindVertexBuffers(handle(), binding, 1, &vertexBuffer->handle(), &offset); } VkCommandPoolObj::VkCommandPoolObj(VkDeviceObj *device, uint32_t queue_family_index, VkCommandPoolCreateFlags flags) { init(*device, vk_testing::CommandPool::create_info(queue_family_index, flags)); } bool VkDepthStencilObj::Initialized() { return m_initialized; } VkDepthStencilObj::VkDepthStencilObj(VkDeviceObj *device) : VkImageObj(device) { m_initialized = false; } VkImageView *VkDepthStencilObj::BindInfo() { return &m_attachmentBindInfo; } VkFormat VkDepthStencilObj::Format() const { return this->m_depth_stencil_fmt; } void VkDepthStencilObj::Init(VkDeviceObj *device, int32_t width, int32_t height, VkFormat format, VkImageUsageFlags usage, VkImageAspectFlags aspect) { VkImageViewCreateInfo view_info = {}; m_device = device; m_initialized = true; m_depth_stencil_fmt = format; /* create image */ VkImageObj::Init(width, height, 1, m_depth_stencil_fmt, usage, VK_IMAGE_TILING_OPTIMAL); // allows for overriding by caller if (aspect == 0) { aspect = VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; if (FormatIsDepthOnly(format)) aspect = VK_IMAGE_ASPECT_DEPTH_BIT; else if (FormatIsStencilOnly(format)) aspect = VK_IMAGE_ASPECT_STENCIL_BIT; } SetLayout(aspect, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.image = VK_NULL_HANDLE; view_info.subresourceRange.aspectMask = aspect; view_info.subresourceRange.baseMipLevel = 0; view_info.subresourceRange.levelCount = 1; view_info.subresourceRange.baseArrayLayer = 0; view_info.subresourceRange.layerCount = 1; view_info.flags = 0; view_info.format = m_depth_stencil_fmt; view_info.image = handle(); view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; m_imageView.init(*m_device, view_info); m_attachmentBindInfo = m_imageView.handle(); }
1
21,242
I think a `std::copy` with `std::back_inserter` will get this done in one call.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -47,6 +47,11 @@ type cryptoSetupServer struct { var _ CryptoSetup = &cryptoSetupServer{} +// ErrHOLExperiment is returned when the client sends the FHL2 tag in the CHLO +// this is an expiremnt implemented by Chrome in QUIC 36, which we don't support +// TODO: remove this when dropping support for QUIC 36 +var ErrHOLExperiment = qerr.Error(qerr.InvalidCryptoMessageParameter, "HOL experiment. Unsupported") + // NewCryptoSetup creates a new CryptoSetup instance for a server func NewCryptoSetup( connID protocol.ConnectionID,
1
package handshake import ( "bytes" "crypto/rand" "encoding/binary" "errors" "io" "sync" "github.com/lucas-clemente/quic-go/crypto" "github.com/lucas-clemente/quic-go/protocol" "github.com/lucas-clemente/quic-go/qerr" "github.com/lucas-clemente/quic-go/utils" ) // KeyDerivationFunction is used for key derivation type KeyDerivationFunction func(forwardSecure bool, sharedSecret, nonces []byte, connID protocol.ConnectionID, chlo []byte, scfg []byte, cert []byte, divNonce []byte, pers protocol.Perspective) (crypto.AEAD, error) // KeyExchangeFunction is used to make a new KEX type KeyExchangeFunction func() crypto.KeyExchange // The CryptoSetupServer handles all things crypto for the Session type cryptoSetupServer struct { connID protocol.ConnectionID sourceAddr []byte version protocol.VersionNumber scfg *ServerConfig diversificationNonce []byte secureAEAD crypto.AEAD forwardSecureAEAD crypto.AEAD receivedForwardSecurePacket bool sentSHLO bool receivedSecurePacket bool aeadChanged chan protocol.EncryptionLevel keyDerivation KeyDerivationFunction keyExchange KeyExchangeFunction cryptoStream io.ReadWriter connectionParameters ConnectionParametersManager mutex sync.RWMutex } var _ CryptoSetup = &cryptoSetupServer{} // NewCryptoSetup creates a new CryptoSetup instance for a server func NewCryptoSetup( connID protocol.ConnectionID, sourceAddr []byte, version protocol.VersionNumber, scfg *ServerConfig, cryptoStream io.ReadWriter, connectionParametersManager ConnectionParametersManager, aeadChanged chan protocol.EncryptionLevel, ) (CryptoSetup, error) { return &cryptoSetupServer{ connID: connID, sourceAddr: sourceAddr, version: version, scfg: scfg, keyDerivation: crypto.DeriveKeysAESGCM, keyExchange: getEphermalKEX, cryptoStream: cryptoStream, connectionParameters: connectionParametersManager, aeadChanged: aeadChanged, }, nil } // HandleCryptoStream reads and writes messages on the crypto stream func (h *cryptoSetupServer) HandleCryptoStream() error { for { var chloData bytes.Buffer messageTag, cryptoData, err := ParseHandshakeMessage(io.TeeReader(h.cryptoStream, &chloData)) if err != nil { return qerr.HandshakeFailed } if messageTag != TagCHLO { return qerr.InvalidCryptoMessageType } utils.Debugf("Got CHLO:\n%s", printHandshakeMessage(cryptoData)) done, err := h.handleMessage(chloData.Bytes(), cryptoData) if err != nil { return err } if done { return nil } } } func (h *cryptoSetupServer) handleMessage(chloData []byte, cryptoData map[Tag][]byte) (bool, error) { sniSlice, ok := cryptoData[TagSNI] if !ok { return false, qerr.Error(qerr.CryptoMessageParameterNotFound, "SNI required") } sni := string(sniSlice) if sni == "" { return false, qerr.Error(qerr.CryptoMessageParameterNotFound, "SNI required") } // prevent version downgrade attacks // see https://groups.google.com/a/chromium.org/forum/#!topic/proto-quic/N-de9j63tCk for a discussion and examples verSlice, ok := cryptoData[TagVER] if !ok { return false, qerr.Error(qerr.InvalidCryptoMessageParameter, "client hello missing version tag") } if len(verSlice) != 4 { return false, qerr.Error(qerr.InvalidCryptoMessageParameter, "incorrect version tag") } verTag := binary.LittleEndian.Uint32(verSlice) ver := protocol.VersionTagToNumber(verTag) // If the client's preferred version is not the version we are currently speaking, then the client went through a version negotiation. In this case, we need to make sure that we actually do not support this version and that it wasn't a downgrade attack. if ver != h.version && protocol.IsSupportedVersion(ver) { return false, qerr.Error(qerr.VersionNegotiationMismatch, "Downgrade attack detected") } var reply []byte var err error certUncompressed, err := h.scfg.certChain.GetLeafCert(sni) if err != nil { return false, err } if !h.isInchoateCHLO(cryptoData, certUncompressed) { // We have a CHLO with a proper server config ID, do a 0-RTT handshake reply, err = h.handleCHLO(sni, chloData, cryptoData) if err != nil { return false, err } _, err = h.cryptoStream.Write(reply) if err != nil { return false, err } return true, nil } // We have an inchoate or non-matching CHLO, we now send a rejection reply, err = h.handleInchoateCHLO(sni, chloData, cryptoData) if err != nil { return false, err } _, err = h.cryptoStream.Write(reply) if err != nil { return false, err } return false, nil } // Open a message func (h *cryptoSetupServer) Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, protocol.EncryptionLevel, error) { h.mutex.RLock() defer h.mutex.RUnlock() if h.forwardSecureAEAD != nil { res, err := h.forwardSecureAEAD.Open(dst, src, packetNumber, associatedData) if err == nil { h.receivedForwardSecurePacket = true return res, protocol.EncryptionForwardSecure, nil } if h.receivedForwardSecurePacket { return nil, protocol.EncryptionUnspecified, err } } if h.secureAEAD != nil { res, err := h.secureAEAD.Open(dst, src, packetNumber, associatedData) if err == nil { h.receivedSecurePacket = true return res, protocol.EncryptionSecure, nil } if h.receivedSecurePacket { return nil, protocol.EncryptionUnspecified, err } } nullAEAD := &crypto.NullAEAD{} res, err := nullAEAD.Open(dst, src, packetNumber, associatedData) if err != nil { return res, protocol.EncryptionUnspecified, err } return res, protocol.EncryptionUnencrypted, err } // Seal a message, call LockForSealing() before! func (h *cryptoSetupServer) Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, protocol.EncryptionLevel) { if h.forwardSecureAEAD != nil && h.sentSHLO { return h.forwardSecureAEAD.Seal(dst, src, packetNumber, associatedData), protocol.EncryptionForwardSecure } else if h.secureAEAD != nil { // secureAEAD and forwardSecureAEAD are created at the same time (when receiving the CHLO) // make sure that the SHLO isn't sent forward-secure h.sentSHLO = true return h.secureAEAD.Seal(dst, src, packetNumber, associatedData), protocol.EncryptionSecure } else { return (&crypto.NullAEAD{}).Seal(dst, src, packetNumber, associatedData), protocol.EncryptionUnencrypted } } func (h *cryptoSetupServer) SealWith(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte, forceEncryptionLevel protocol.EncryptionLevel) ([]byte, protocol.EncryptionLevel, error) { switch forceEncryptionLevel { case protocol.EncryptionUnencrypted: return (&crypto.NullAEAD{}).Seal(dst, src, packetNumber, associatedData), protocol.EncryptionUnencrypted, nil case protocol.EncryptionSecure: if h.secureAEAD == nil { return nil, protocol.EncryptionUnspecified, errors.New("CryptoSetupServer: no secureAEAD") } return h.secureAEAD.Seal(dst, src, packetNumber, associatedData), protocol.EncryptionSecure, nil case protocol.EncryptionForwardSecure: if h.forwardSecureAEAD == nil { return nil, protocol.EncryptionUnspecified, errors.New("CryptoSetupServer: no forwardSecureAEAD") } return h.forwardSecureAEAD.Seal(dst, src, packetNumber, associatedData), protocol.EncryptionForwardSecure, nil } return nil, protocol.EncryptionUnspecified, errors.New("no encryption level specified") } func (h *cryptoSetupServer) isInchoateCHLO(cryptoData map[Tag][]byte, cert []byte) bool { if _, ok := cryptoData[TagPUBS]; !ok { return true } scid, ok := cryptoData[TagSCID] if !ok || !bytes.Equal(h.scfg.ID, scid) { return true } xlctTag, ok := cryptoData[TagXLCT] if !ok || len(xlctTag) != 8 { return true } xlct := binary.LittleEndian.Uint64(xlctTag) if crypto.HashCert(cert) != xlct { return true } if err := h.scfg.stkSource.VerifyToken(h.sourceAddr, cryptoData[TagSTK]); err != nil { utils.Infof("STK invalid: %s", err.Error()) return true } return false } func (h *cryptoSetupServer) handleInchoateCHLO(sni string, chlo []byte, cryptoData map[Tag][]byte) ([]byte, error) { if len(chlo) < protocol.ClientHelloMinimumSize { return nil, qerr.Error(qerr.CryptoInvalidValueLength, "CHLO too small") } token, err := h.scfg.stkSource.NewToken(h.sourceAddr) if err != nil { return nil, err } replyMap := map[Tag][]byte{ TagSCFG: h.scfg.Get(), TagSTK: token, TagSVID: []byte("quic-go"), } if h.scfg.stkSource.VerifyToken(h.sourceAddr, cryptoData[TagSTK]) == nil { proof, err := h.scfg.Sign(sni, chlo) if err != nil { return nil, err } commonSetHashes := cryptoData[TagCCS] cachedCertsHashes := cryptoData[TagCCRT] certCompressed, err := h.scfg.GetCertsCompressed(sni, commonSetHashes, cachedCertsHashes) if err != nil { return nil, err } // Token was valid, send more details replyMap[TagPROF] = proof replyMap[TagCERT] = certCompressed } var serverReply bytes.Buffer WriteHandshakeMessage(&serverReply, TagREJ, replyMap) utils.Debugf("Sending REJ:\n%s", printHandshakeMessage(replyMap)) return serverReply.Bytes(), nil } func (h *cryptoSetupServer) handleCHLO(sni string, data []byte, cryptoData map[Tag][]byte) ([]byte, error) { // We have a CHLO matching our server config, we can continue with the 0-RTT handshake sharedSecret, err := h.scfg.kex.CalculateSharedKey(cryptoData[TagPUBS]) if err != nil { return nil, err } h.mutex.Lock() defer h.mutex.Unlock() certUncompressed, err := h.scfg.certChain.GetLeafCert(sni) if err != nil { return nil, err } serverNonce := make([]byte, 32) if _, err = rand.Read(serverNonce); err != nil { return nil, err } h.diversificationNonce = make([]byte, 32) if _, err = rand.Read(h.diversificationNonce); err != nil { return nil, err } clientNonce := cryptoData[TagNONC] err = h.validateClientNonce(clientNonce) if err != nil { return nil, err } aead := cryptoData[TagAEAD] if !bytes.Equal(aead, []byte("AESG")) { return nil, qerr.Error(qerr.CryptoNoSupport, "Unsupported AEAD or KEXS") } kexs := cryptoData[TagKEXS] if !bytes.Equal(kexs, []byte("C255")) { return nil, qerr.Error(qerr.CryptoNoSupport, "Unsupported AEAD or KEXS") } h.secureAEAD, err = h.keyDerivation( false, sharedSecret, clientNonce, h.connID, data, h.scfg.Get(), certUncompressed, h.diversificationNonce, protocol.PerspectiveServer, ) if err != nil { return nil, err } h.aeadChanged <- protocol.EncryptionSecure // Generate a new curve instance to derive the forward secure key var fsNonce bytes.Buffer fsNonce.Write(clientNonce) fsNonce.Write(serverNonce) ephermalKex := h.keyExchange() ephermalSharedSecret, err := ephermalKex.CalculateSharedKey(cryptoData[TagPUBS]) if err != nil { return nil, err } h.forwardSecureAEAD, err = h.keyDerivation( true, ephermalSharedSecret, fsNonce.Bytes(), h.connID, data, h.scfg.Get(), certUncompressed, nil, protocol.PerspectiveServer, ) if err != nil { return nil, err } err = h.connectionParameters.SetFromMap(cryptoData) if err != nil { return nil, err } replyMap, err := h.connectionParameters.GetHelloMap() if err != nil { return nil, err } // add crypto parameters replyMap[TagPUBS] = ephermalKex.PublicKey() replyMap[TagSNO] = serverNonce replyMap[TagVER] = protocol.SupportedVersionsAsTags // note that the SHLO *has* to fit into one packet var reply bytes.Buffer WriteHandshakeMessage(&reply, TagSHLO, replyMap) utils.Debugf("Sending SHLO:\n%s", printHandshakeMessage(replyMap)) h.aeadChanged <- protocol.EncryptionForwardSecure return reply.Bytes(), nil } // DiversificationNonce returns a diversification nonce if required in the next packet to be Seal'ed. See LockForSealing()! func (h *cryptoSetupServer) DiversificationNonce(force bool) []byte { if force || (h.secureAEAD != nil && !h.sentSHLO) { return h.diversificationNonce } return nil } func (h *cryptoSetupServer) SetDiversificationNonce(data []byte) error { panic("not needed for cryptoSetupServer") } // LockForSealing should be called before Seal(). It is needed so that diversification nonces can be obtained before packets are sealed, and the AEADs are not changed in the meantime. func (h *cryptoSetupServer) LockForSealing() { h.mutex.RLock() } // UnlockForSealing should be called after Seal() is complete, see LockForSealing(). func (h *cryptoSetupServer) UnlockForSealing() { h.mutex.RUnlock() } // HandshakeComplete returns true after the first forward secure packet was received form the client. func (h *cryptoSetupServer) HandshakeComplete() bool { return h.receivedForwardSecurePacket } func (h *cryptoSetupServer) validateClientNonce(nonce []byte) error { if len(nonce) != 32 { return qerr.Error(qerr.InvalidCryptoMessageParameter, "invalid client nonce length") } if !bytes.Equal(nonce[4:12], h.scfg.obit) { return qerr.Error(qerr.InvalidCryptoMessageParameter, "OBIT not matching") } return nil }
1
5,773
In the future: s/QUIC 36/Version36/ to make grepping easier
lucas-clemente-quic-go
go
@@ -29,11 +29,13 @@ class FileCard extends Component { } } - tempStoreMeta = (ev, name) => { + tempStoreMeta = (ev, name, type) => { + var value = ev.target.value + if (type === 'checkbox') value = ev.target.checked ? 'on' : 'off' this.setState({ formState: { ...this.state.formState, - [name]: ev.target.value + [name]: value } }) }
1
const { h, Component } = require('preact') const getFileTypeIcon = require('../../utils/getFileTypeIcon') const ignoreEvent = require('../../utils/ignoreEvent.js') const FilePreview = require('../FilePreview') class FileCard extends Component { constructor (props) { super(props) const file = this.props.files[this.props.fileCardFor] const metaFields = this.props.metaFields || [] const storedMetaData = {} metaFields.forEach((field) => { storedMetaData[field.id] = file.meta[field.id] || '' }) this.state = { formState: storedMetaData } } saveOnEnter = (ev) => { if (ev.keyCode === 13) { ev.stopPropagation() ev.preventDefault() const file = this.props.files[this.props.fileCardFor] this.props.saveFileCard(this.state.formState, file.id) } } tempStoreMeta = (ev, name) => { this.setState({ formState: { ...this.state.formState, [name]: ev.target.value } }) } handleSave = () => { const fileID = this.props.fileCardFor this.props.saveFileCard(this.state.formState, fileID) } handleCancel = () => { this.props.toggleFileCard() } renderMetaFields = () => { const metaFields = this.props.metaFields || [] return metaFields.map((field) => { const id = `uppy-Dashboard-FileCard-input-${field.id}` return ( <fieldset key={field.id} class="uppy-Dashboard-FileCard-fieldset"> <label class="uppy-Dashboard-FileCard-label" for={id}>{field.name}</label> <input class="uppy-u-reset uppy-c-textInput uppy-Dashboard-FileCard-input" id={id} type="text" value={this.state.formState[field.id]} placeholder={field.placeholder} onkeyup={this.saveOnEnter} onkeydown={this.saveOnEnter} onkeypress={this.saveOnEnter} oninput={ev => this.tempStoreMeta(ev, field.id)} data-uppy-super-focusable /> </fieldset> ) }) } render () { const file = this.props.files[this.props.fileCardFor] return ( <div class="uppy-Dashboard-FileCard" data-uppy-panelType="FileCard" onDragOver={ignoreEvent} onDragLeave={ignoreEvent} onDrop={ignoreEvent} onPaste={ignoreEvent} > <div class="uppy-DashboardContent-bar"> <div class="uppy-DashboardContent-title" role="heading" aria-level="1"> {this.props.i18nArray('editing', { file: <span class="uppy-DashboardContent-titleFile">{file.meta ? file.meta.name : file.name}</span> })} </div> <button class="uppy-DashboardContent-back" type="button" title={this.props.i18n('finishEditingFile')} onclick={this.handleSave} > {this.props.i18n('done')} </button> </div> <div class="uppy-Dashboard-FileCard-inner"> <div class="uppy-Dashboard-FileCard-preview" style={{ backgroundColor: getFileTypeIcon(file.type).color }}> <FilePreview file={file} /> </div> <div class="uppy-Dashboard-FileCard-info"> {this.renderMetaFields()} </div> <div class="uppy-Dashboard-FileCard-actions"> <button class="uppy-u-reset uppy-c-btn uppy-c-btn-primary uppy-Dashboard-FileCard-actionsBtn" type="button" onclick={this.handleSave} > {this.props.i18n('saveChanges')} </button> <button class="uppy-u-reset uppy-c-btn uppy-c-btn-link uppy-Dashboard-FileCard-actionsBtn" type="button" onclick={this.handleCancel} > {this.props.i18n('cancel')} </button> </div> </div> </div> ) } } module.exports = FileCard
1
12,922
This is an example of why I prefer a flexible custom `render()` solution 'on'/'off' may not be the right value for every application. You have to make a lot of decisions even for very simple form fields :(
transloadit-uppy
js
@@ -568,7 +568,6 @@ func (data *TestData) deployAntreaIPSec() error { func (data *TestData) deployAntreaFlowExporter(ipfixCollector string) error { // Enable flow exporter feature and add related config params to antrea agent configmap. ac := []configChange{ - {"FlowExporter", "true", true}, {"flowPollInterval", "\"1s\"", false}, {"activeFlowExportTimeout", fmt.Sprintf("\"%v\"", exporterActiveFlowExportTimeout), false}, {"idleFlowExportTimeout", fmt.Sprintf("\"%v\"", exporterIdleFlowExportTimeout), false},
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package e2e import ( "bytes" "context" "encoding/json" "fmt" "math/rand" "net" "os" "path/filepath" "regexp" "strconv" "strings" "time" "github.com/containernetworking/plugins/pkg/ip" "golang.org/x/mod/semver" "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/remotecommand" "k8s.io/component-base/featuregate" aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "antrea.io/antrea/pkg/agent/config" crdclientset "antrea.io/antrea/pkg/client/clientset/versioned" "antrea.io/antrea/pkg/features" legacycrdclientset "antrea.io/antrea/pkg/legacyclient/clientset/versioned" "antrea.io/antrea/test/e2e/providers" ) const ( defaultTimeout = 90 * time.Second defaultInterval = 1 * time.Second // antreaNamespace is the K8s Namespace in which all Antrea resources are running. antreaNamespace string = "kube-system" flowAggregatorNamespace string = "flow-aggregator" antreaConfigVolume string = "antrea-config" flowAggregatorConfigVolume string = "flow-aggregator-config" antreaDaemonSet string = "antrea-agent" antreaWindowsDaemonSet string = "antrea-agent-windows" antreaDeployment string = "antrea-controller" flowAggregatorDeployment string = "flow-aggregator" antreaDefaultGW string = "antrea-gw0" testNamespace string = "antrea-test" busyboxContainerName string = "busybox" agnhostContainerName string = "agnhost" controllerContainerName string = "antrea-controller" ovsContainerName string = "antrea-ovs" agentContainerName string = "antrea-agent" antreaYML string = "antrea.yml" antreaIPSecYML string = "antrea-ipsec.yml" antreaCovYML string = "antrea-coverage.yml" antreaIPSecCovYML string = "antrea-ipsec-coverage.yml" flowAggregatorYML string = "flow-aggregator.yml" flowAggregatorCovYML string = "flow-aggregator-coverage.yml" defaultBridgeName string = "br-int" monitoringNamespace string = "monitoring" antreaControllerCovBinary string = "antrea-controller-coverage" antreaAgentCovBinary string = "antrea-agent-coverage" flowAggregatorCovBinary string = "flow-aggregator-coverage" antreaControllerCovFile string = "antrea-controller.cov.out" antreaAgentCovFile string = "antrea-agent.cov.out" flowAggregatorCovFile string = "flow-aggregator.cov.out" antreaAgentConfName string = "antrea-agent.conf" antreaControllerConfName string = "antrea-controller.conf" flowAggregatorConfName string = "flow-aggregator.conf" nameSuffixLength int = 8 agnhostImage = "projects.registry.vmware.com/antrea/agnhost:2.26" busyboxImage = "projects.registry.vmware.com/library/busybox" nginxImage = "projects.registry.vmware.com/antrea/nginx" perftoolImage = "projects.registry.vmware.com/antrea/perftool" ipfixCollectorImage = "projects.registry.vmware.com/antrea/ipfix-collector:v0.5.3" ipfixCollectorPort = "4739" nginxLBService = "nginx-loadbalancer" exporterActiveFlowExportTimeout = 2 * time.Second exporterIdleFlowExportTimeout = 1 * time.Second aggregatorActiveFlowRecordTimeout = 3500 * time.Millisecond aggregatorInactiveFlowRecordTimeout = 6 * time.Second ) type ClusterNode struct { idx int // 0 for control-plane Node name string ip string podV4NetworkCIDR string podV6NetworkCIDR string gwV4Addr string gwV6Addr string os string } type ClusterInfo struct { numNodes int podV4NetworkCIDR string podV6NetworkCIDR string svcV4NetworkCIDR string svcV6NetworkCIDR string controlPlaneNodeName string controlPlaneNodeIP string nodes map[int]ClusterNode nodesOS map[string]string windowsNodes []int k8sServerVersion string } var clusterInfo ClusterInfo type TestOptions struct { providerName string providerConfigPath string logsExportDir string logsExportOnSuccess bool withBench bool enableCoverage bool coverageDir string } var testOptions TestOptions var provider providers.ProviderInterface // podInfo combines OS info with a Pod name. It is useful when choosing commands and options on Pods of different OS (Windows, Linux). type podInfo struct { name string os string } // TestData stores the state required for each test case. type TestData struct { kubeConfig *restclient.Config clientset kubernetes.Interface aggregatorClient aggregatorclientset.Interface crdClient crdclientset.Interface legacyCrdClient legacycrdclientset.Interface logsDirForTestCase string } type configChange struct { field string value string isFeatureGate bool } var testData *TestData type PodIPs struct { ipv4 *net.IP ipv6 *net.IP ipStrings []string } func (p PodIPs) String() string { res := "" if p.ipv4 != nil { res += fmt.Sprintf("IPv4(%s),", p.ipv4.String()) } if p.ipv6 != nil { res += fmt.Sprintf("IPv6(%s),", p.ipv6.String()) } return fmt.Sprintf("%sIPstrings(%s)", res, strings.Join(p.ipStrings, ",")) } func (p *PodIPs) hasSameIP(p1 *PodIPs) bool { if len(p.ipStrings) == 0 && len(p1.ipStrings) == 0 { return true } if p.ipv4 != nil && p1.ipv4 != nil && p.ipv4.Equal(*(p1.ipv4)) { return true } if p.ipv6 != nil && p1.ipv6 != nil && p.ipv6.Equal(*(p1.ipv6)) { return true } return false } // workerNodeName returns an empty string if there is no worker Node with the provided idx // (including if idx is 0, which is reserved for the control-plane Node) func workerNodeName(idx int) string { if idx == 0 { // control-plane Node return "" } if node, ok := clusterInfo.nodes[idx]; !ok { return "" } else { return node.name } } // workerNodeIP returns an empty string if there is no worker Node with the provided idx // (including if idx is 0, which is reserved for the control-plane Node) func workerNodeIP(idx int) string { if idx == 0 { // control-plane Node return "" } if node, ok := clusterInfo.nodes[idx]; !ok { return "" } else { return node.ip } } // nodeGatewayIPs returns the Antrea gateway's IPv4 address and IPv6 address for the provided Node // (if applicable), in that order. func nodeGatewayIPs(idx int) (string, string) { if node, ok := clusterInfo.nodes[idx]; !ok { return "", "" } else { return node.gwV4Addr, node.gwV6Addr } } func controlPlaneNodeName() string { return clusterInfo.controlPlaneNodeName } func controlPlaneNodeIP() string { return clusterInfo.controlPlaneNodeIP } // nodeName returns an empty string if there is no Node with the provided idx. If idx is 0, the name // of the control-plane Node will be returned. func nodeName(idx int) string { if node, ok := clusterInfo.nodes[idx]; !ok { return "" } else { return node.name } } // nodeIP returns an empty string if there is no Node with the provided idx. If idx is 0, the IP // of the control-plane Node will be returned. func nodeIP(idx int) string { if node, ok := clusterInfo.nodes[idx]; !ok { return "" } else { return node.ip } } func labelNodeRoleControlPlane() string { // TODO: return labelNodeRoleControlPlane unconditionally when the min K8s version // requirement to run Antrea becomes K8s v1.20 const labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane" const labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // If clusterInfo.k8sServerVersion < "v1.20.0" if semver.Compare(clusterInfo.k8sServerVersion, "v1.20.0") < 0 { return labelNodeRoleOldControlPlane } return labelNodeRoleControlPlane } func controlPlaneNoScheduleToleration() corev1.Toleration { // the Node taint still uses "master" in K8s v1.20 return corev1.Toleration{ Key: "node-role.kubernetes.io/master", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoSchedule, } } func initProvider() error { providerFactory := map[string]func(string) (providers.ProviderInterface, error){ "vagrant": providers.NewVagrantProvider, "kind": providers.NewKindProvider, "remote": providers.NewRemoteProvider, } if fn, ok := providerFactory[testOptions.providerName]; ok { if newProvider, err := fn(testOptions.providerConfigPath); err != nil { return err } else { provider = newProvider } } else { return fmt.Errorf("unknown provider '%s'", testOptions.providerName) } return nil } // RunCommandOnNode is a convenience wrapper around the Provider interface RunCommandOnNode method. func RunCommandOnNode(nodeName string, cmd string) (code int, stdout string, stderr string, err error) { return provider.RunCommandOnNode(nodeName, cmd) } func collectClusterInfo() error { // retrieve Node information nodes, err := testData.clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return fmt.Errorf("error when listing cluster Nodes: %v", err) } workerIdx := 1 clusterInfo.nodes = make(map[int]ClusterNode) clusterInfo.nodesOS = make(map[string]string) for _, node := range nodes.Items { isControlPlaneNode := func() bool { _, ok := node.Labels[labelNodeRoleControlPlane()] return ok }() var nodeIP string for _, address := range node.Status.Addresses { if address.Type == corev1.NodeInternalIP { nodeIP = address.Address break } } var nodeIdx int // If multiple control-plane Nodes (HA), we will select the last one in the list if isControlPlaneNode { nodeIdx = 0 clusterInfo.controlPlaneNodeName = node.Name clusterInfo.controlPlaneNodeIP = nodeIP } else { nodeIdx = workerIdx workerIdx++ } var podV4NetworkCIDR, podV6NetworkCIDR string var gwV4Addr, gwV6Addr string processPodCIDR := func(podCIDR string) error { _, cidr, err := net.ParseCIDR(podCIDR) if err != nil { return err } if cidr.IP.To4() != nil { podV4NetworkCIDR = podCIDR gwV4Addr = ip.NextIP(cidr.IP).String() } else { podV6NetworkCIDR = podCIDR gwV6Addr = ip.NextIP(cidr.IP).String() } return nil } if len(node.Spec.PodCIDRs) == 0 { if err := processPodCIDR(node.Spec.PodCIDR); err != nil { return fmt.Errorf("error when processing PodCIDR field for Node %s: %v", node.Name, err) } } else { for _, podCIDR := range node.Spec.PodCIDRs { if err := processPodCIDR(podCIDR); err != nil { return fmt.Errorf("error when processing PodCIDRs field for Node %s: %v", node.Name, err) } } } clusterInfo.nodes[nodeIdx] = ClusterNode{ idx: nodeIdx, name: node.Name, ip: nodeIP, podV4NetworkCIDR: podV4NetworkCIDR, podV6NetworkCIDR: podV6NetworkCIDR, gwV4Addr: gwV4Addr, gwV6Addr: gwV6Addr, os: node.Status.NodeInfo.OperatingSystem, } if node.Status.NodeInfo.OperatingSystem == "windows" { clusterInfo.windowsNodes = append(clusterInfo.windowsNodes, nodeIdx) } clusterInfo.nodesOS[node.Name] = node.Status.NodeInfo.OperatingSystem } if clusterInfo.controlPlaneNodeName == "" { return fmt.Errorf("error when listing cluster Nodes: control-plane Node not found") } clusterInfo.numNodes = workerIdx retrieveCIDRs := func(cmd string, reg string) ([]string, error) { res := make([]string, 2) rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return res, fmt.Errorf("error when running the following command `%s` on control-plane Node: %v, %s", cmd, err, stdout) } re := regexp.MustCompile(reg) if matches := re.FindStringSubmatch(stdout); len(matches) == 0 { return res, fmt.Errorf("cannot retrieve CIDR, unexpected kubectl output: %s", stdout) } else { cidrs := strings.Split(matches[1], ",") if len(cidrs) == 1 { _, cidr, err := net.ParseCIDR(cidrs[0]) if err != nil { return res, fmt.Errorf("CIDR cannot be parsed: %s", cidrs[0]) } if cidr.IP.To4() != nil { res[0] = cidrs[0] } else { res[1] = cidrs[0] } } else if len(cidrs) == 2 { _, cidr, err := net.ParseCIDR(cidrs[0]) if err != nil { return res, fmt.Errorf("CIDR cannot be parsed: %s", cidrs[0]) } if cidr.IP.To4() != nil { res[0] = cidrs[0] res[1] = cidrs[1] } else { res[0] = cidrs[1] res[1] = cidrs[0] } } else { return res, fmt.Errorf("unexpected cluster CIDR: %s", matches[1]) } } return res, nil } // retrieve cluster CIDRs podCIDRs, err := retrieveCIDRs("kubectl cluster-info dump | grep cluster-cidr", `cluster-cidr=([^"]+)`) if err != nil { return err } clusterInfo.podV4NetworkCIDR = podCIDRs[0] clusterInfo.podV6NetworkCIDR = podCIDRs[1] // retrieve service CIDRs svcCIDRs, err := retrieveCIDRs("kubectl cluster-info dump | grep service-cluster-ip-range", `service-cluster-ip-range=([^"]+)`) if err != nil { return err } clusterInfo.svcV4NetworkCIDR = svcCIDRs[0] clusterInfo.svcV6NetworkCIDR = svcCIDRs[1] // retrieve K8s server version serverVersion, err := testData.clientset.Discovery().ServerVersion() if err != nil { return err } clusterInfo.k8sServerVersion = serverVersion.String() return nil } // createNamespace creates the provided namespace. func (data *TestData) createNamespace(namespace string) error { ns := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, }, } if ns, err := data.clientset.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}); err != nil { // Ignore error if the namespace already exists if !errors.IsAlreadyExists(err) { return fmt.Errorf("error when creating '%s' Namespace: %v", namespace, err) } // When namespace already exists, check phase if ns.Status.Phase == corev1.NamespaceTerminating { return fmt.Errorf("error when creating '%s' Namespace: namespace exists but is in 'Terminating' phase", namespace) } } return nil } // createTestNamespace creates the namespace used for tests. func (data *TestData) createTestNamespace() error { return data.createNamespace(testNamespace) } // deleteNamespace deletes the provided namespace and waits for deletion to actually complete. func (data *TestData) deleteNamespace(namespace string, timeout time.Duration) error { var gracePeriodSeconds int64 = 0 var propagationPolicy = metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &propagationPolicy, } if err := data.clientset.CoreV1().Namespaces().Delete(context.TODO(), namespace, deleteOptions); err != nil { if errors.IsNotFound(err) { // namespace does not exist, we return right away return nil } return fmt.Errorf("error when deleting '%s' Namespace: %v", namespace, err) } err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if ns, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { // Success return true, nil } return false, fmt.Errorf("error when getting Namespace '%s' after delete: %v", namespace, err) } else if ns.Status.Phase != corev1.NamespaceTerminating { return false, fmt.Errorf("deleted Namespace '%s' should be in 'Terminating' phase", namespace) } // Keep trying return false, nil }) return err } // deleteTestNamespace deletes test namespace and waits for deletion to actually complete. func (data *TestData) deleteTestNamespace(timeout time.Duration) error { return data.deleteNamespace(testNamespace, timeout) } // deployAntreaCommon deploys Antrea using kubectl on the control-plane Node. func (data *TestData) deployAntreaCommon(yamlFile string, extraOptions string, waitForAgentRollout bool) error { // TODO: use the K8s apiserver when server side apply is available? // See https://kubernetes.io/docs/reference/using-api/api-concepts/#server-side-apply rc, _, _, err := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply %s -f %s", extraOptions, yamlFile)) if err != nil || rc != 0 { return fmt.Errorf("error when deploying Antrea; is %s available on the control-plane Node?", yamlFile) } rc, _, _, err = provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deploy/%s --timeout=%v", antreaNamespace, antreaDeployment, defaultTimeout)) if err != nil || rc != 0 { return fmt.Errorf("error when waiting for antrea-controller rollout to complete") } if waitForAgentRollout { rc, _, _, err = provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status ds/%s --timeout=%v", antreaNamespace, antreaDaemonSet, defaultTimeout)) if err != nil || rc != 0 { return fmt.Errorf("error when waiting for antrea-agent rollout to complete") } } return nil } // deployAntrea deploys Antrea with the standard manifest. func (data *TestData) deployAntrea() error { if testOptions.enableCoverage { return data.deployAntreaCommon(antreaCovYML, "", true) } return data.deployAntreaCommon(antreaYML, "", true) } // deployAntreaIPSec deploys Antrea with IPSec tunnel enabled. func (data *TestData) deployAntreaIPSec() error { if testOptions.enableCoverage { return data.deployAntreaCommon(antreaIPSecCovYML, "", true) } return data.deployAntreaCommon(antreaIPSecYML, "", true) } // deployAntreaFlowExporter deploys Antrea with flow exporter config params enabled. func (data *TestData) deployAntreaFlowExporter(ipfixCollector string) error { // Enable flow exporter feature and add related config params to antrea agent configmap. ac := []configChange{ {"FlowExporter", "true", true}, {"flowPollInterval", "\"1s\"", false}, {"activeFlowExportTimeout", fmt.Sprintf("\"%v\"", exporterActiveFlowExportTimeout), false}, {"idleFlowExportTimeout", fmt.Sprintf("\"%v\"", exporterIdleFlowExportTimeout), false}, } if ipfixCollector != "" { ac = append(ac, configChange{"flowCollectorAddr", fmt.Sprintf("\"%s\"", ipfixCollector), false}) } return data.mutateAntreaConfigMap(nil, ac, false, true) } // deployFlowAggregator deploys flow aggregator with ipfix collector address. func (data *TestData) deployFlowAggregator(ipfixCollector string) (string, error) { flowAggYaml := flowAggregatorYML if testOptions.enableCoverage { flowAggYaml = flowAggregatorCovYML } rc, _, _, err := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply -f %s", flowAggYaml)) if err != nil || rc != 0 { return "", fmt.Errorf("error when deploying flow aggregator; %s not available on the control-plane Node", flowAggYaml) } svc, err := data.clientset.CoreV1().Services(flowAggregatorNamespace).Get(context.TODO(), flowAggregatorDeployment, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("unable to get service %v: %v", flowAggregatorDeployment, err) } if err = data.mutateFlowAggregatorConfigMap(ipfixCollector, svc.Spec.ClusterIP); err != nil { return "", err } if rc, _, _, err = provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deployment/%s --timeout=%v", flowAggregatorNamespace, flowAggregatorDeployment, 2*defaultTimeout)); err != nil || rc != 0 { _, stdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace)) _, logStdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s logs -l app=flow-aggregator", flowAggregatorNamespace)) return stdout, fmt.Errorf("error when waiting for flow aggregator rollout to complete. kubectl describe output: %s, logs: %s", stdout, logStdout) } return svc.Spec.ClusterIP, nil } func (data *TestData) mutateFlowAggregatorConfigMap(ipfixCollector string, faClusterIP string) error { configMap, err := data.GetFlowAggregatorConfigMap() if err != nil { return err } flowAggregatorConf, _ := configMap.Data[flowAggregatorConfName] flowAggregatorConf = strings.Replace(flowAggregatorConf, "#externalFlowCollectorAddr: \"\"", fmt.Sprintf("externalFlowCollectorAddr: \"%s\"", ipfixCollector), 1) flowAggregatorConf = strings.Replace(flowAggregatorConf, "#activeFlowRecordTimeout: 60s", fmt.Sprintf("activeFlowRecordTimeout: %v", aggregatorActiveFlowRecordTimeout), 1) flowAggregatorConf = strings.Replace(flowAggregatorConf, "#inactiveFlowRecordTimeout: 90s", fmt.Sprintf("inactiveFlowRecordTimeout: %v", aggregatorInactiveFlowRecordTimeout), 1) if testOptions.providerName == "kind" { // In Kind cluster, there are issues with DNS name resolution on worker nodes. // We will use flow aggregator service cluster IP to generate the server certificate for tls communication faAddress := fmt.Sprintf("flowAggregatorAddress: %s", faClusterIP) flowAggregatorConf = strings.Replace(flowAggregatorConf, "#flowAggregatorAddress: \"flow-aggregator.flow-aggregator.svc\"", faAddress, 1) } configMap.Data[flowAggregatorConfName] = flowAggregatorConf if _, err := data.clientset.CoreV1().ConfigMaps(flowAggregatorNamespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed to update ConfigMap %s: %v", configMap.Name, err) } return nil } func (data *TestData) GetFlowAggregatorConfigMap() (*corev1.ConfigMap, error) { deployment, err := data.clientset.AppsV1().Deployments(flowAggregatorNamespace).Get(context.TODO(), flowAggregatorDeployment, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to retrieve Flow aggregator deployment: %v", err) } var configMapName string for _, volume := range deployment.Spec.Template.Spec.Volumes { if volume.ConfigMap != nil && volume.Name == flowAggregatorConfigVolume { configMapName = volume.ConfigMap.Name break } } if len(configMapName) == 0 { return nil, fmt.Errorf("failed to locate %s ConfigMap volume", flowAggregatorConfigVolume) } configMap, err := data.clientset.CoreV1().ConfigMaps(flowAggregatorNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get ConfigMap %s: %v", configMapName, err) } return configMap, nil } // getAgentContainersRestartCount reads the restart count for every container across all Antrea // Agent Pods and returns the sum of all the read values. func (data *TestData) getAgentContainersRestartCount() (int, error) { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return 0, fmt.Errorf("failed to list antrea-agent Pods: %v", err) } containerRestarts := 0 for _, pod := range pods.Items { for _, containerStatus := range pod.Status.ContainerStatuses { containerRestarts += int(containerStatus.RestartCount) } } return containerRestarts, nil } // waitForAntreaDaemonSetPods waits for the K8s apiserver to report that all the Antrea Pods are // available, i.e. all the Nodes have one or more of the Antrea daemon Pod running and available. func (data *TestData) waitForAntreaDaemonSetPods(timeout time.Duration) error { err := wait.Poll(defaultInterval, timeout, func() (bool, error) { getDS := func(dsName string, os string) (*appsv1.DaemonSet, error) { ds, err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("error when getting Antrea %s daemonset: %v", os, err) } return ds, nil } var dsLinux *appsv1.DaemonSet var err error if dsLinux, err = getDS(antreaDaemonSet, "Linux"); err != nil { return false, err } currentNumAvailable := dsLinux.Status.NumberAvailable UpdatedNumberScheduled := dsLinux.Status.UpdatedNumberScheduled if len(clusterInfo.windowsNodes) != 0 { var dsWindows *appsv1.DaemonSet if dsWindows, err = getDS(antreaWindowsDaemonSet, "Windows"); err != nil { return false, err } currentNumAvailable += dsWindows.Status.NumberAvailable UpdatedNumberScheduled += dsWindows.Status.UpdatedNumberScheduled } // Make sure that all Daemon Pods are available. // We use clusterInfo.numNodes instead of DesiredNumberScheduled because // DesiredNumberScheduled may not be updated right away. If it is still set to 0 the // first time we get the DaemonSet's Status, we would return immediately instead of // waiting. desiredNumber := int32(clusterInfo.numNodes) if currentNumAvailable != desiredNumber || UpdatedNumberScheduled != desiredNumber { return false, nil } // Make sure that all antrea-agent Pods are not terminating. This is required because NumberAvailable of // DaemonSet counts Pods even if they are terminating. Deleting antrea-agent Pods directly does not cause the // number to decrease if the process doesn't quit immediately, e.g. when the signal is caught by bincover // program and triggers coverage calculation. pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", }) if err != nil { return false, fmt.Errorf("failed to list antrea-agent Pods: %v", err) } if len(pods.Items) != clusterInfo.numNodes { return false, nil } for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { return false, nil } } return true, nil }) if err == wait.ErrWaitTimeout { _, stdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", antreaNamespace)) return fmt.Errorf("antrea-agent DaemonSet not ready within %v; kubectl describe pod output: %v", defaultTimeout, stdout) } else if err != nil { return err } return nil } // waitForCoreDNSPods waits for the K8s apiserver to report that all the CoreDNS Pods are available. func (data *TestData) waitForCoreDNSPods(timeout time.Duration) error { err := wait.PollImmediate(defaultInterval, timeout, func() (bool, error) { deployment, err := data.clientset.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error when retrieving CoreDNS deployment: %v", err) } if deployment.Status.UnavailableReplicas == 0 { return true, nil } // Keep trying return false, nil }) if err == wait.ErrWaitTimeout { return fmt.Errorf("some CoreDNS replicas are still unavailable after %v", defaultTimeout) } else if err != nil { return err } return nil } // restartCoreDNSPods deletes all the CoreDNS Pods to force them to be re-scheduled. It then waits // for all the Pods to become available, by calling waitForCoreDNSPods. func (data *TestData) restartCoreDNSPods(timeout time.Duration) error { var gracePeriodSeconds int64 = 1 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } listOptions := metav1.ListOptions{ LabelSelector: "k8s-app=kube-dns", } if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return fmt.Errorf("error when deleting all CoreDNS Pods: %v", err) } return data.waitForCoreDNSPods(timeout) } // checkCoreDNSPods checks that all the Pods for the CoreDNS deployment are ready. If not, it // deletes all the Pods to force them to restart and waits up to timeout for the Pods to become // ready. func (data *TestData) checkCoreDNSPods(timeout time.Duration) error { if deployment, err := data.clientset.AppsV1().Deployments(antreaNamespace).Get(context.TODO(), "coredns", metav1.GetOptions{}); err != nil { return fmt.Errorf("error when retrieving CoreDNS deployment: %v", err) } else if deployment.Status.UnavailableReplicas == 0 { // deployment ready, nothing to do return nil } return data.restartCoreDNSPods(timeout) } // createClient initializes the K8s clientset in the TestData structure. func (data *TestData) createClient() error { kubeconfigPath, err := provider.GetKubeconfigPath() if err != nil { return fmt.Errorf("error when getting Kubeconfig path: %v", err) } loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() loadingRules.ExplicitPath = kubeconfigPath configOverrides := &clientcmd.ConfigOverrides{} kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides).ClientConfig() if err != nil { return fmt.Errorf("error when building kube config: %v", err) } clientset, err := kubernetes.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating kubernetes client: %v", err) } aggregatorClient, err := aggregatorclientset.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating kubernetes aggregatorClient: %v", err) } crdClient, err := crdclientset.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating CRD client: %v", err) } legacyCrdClient, err := legacycrdclientset.NewForConfig(kubeConfig) if err != nil { return fmt.Errorf("error when creating legacy CRD client: %v", err) } data.kubeConfig = kubeConfig data.clientset = clientset data.aggregatorClient = aggregatorClient data.crdClient = crdClient data.legacyCrdClient = legacyCrdClient return nil } // deleteAntrea deletes the Antrea DaemonSet; we use cascading deletion, which means all the Pods created // by Antrea will be deleted. After issuing the deletion request, we poll the K8s apiserver to ensure // that the DaemonSet does not exist any more. This function is a no-op if the Antrea DaemonSet does // not exist at the time the function is called. func (data *TestData) deleteAntrea(timeout time.Duration) error { if testOptions.enableCoverage { data.gracefulExitAntreaAgent(testOptions.coverageDir, "all") } var gracePeriodSeconds int64 = 5 // Foreground deletion policy ensures that by the time the DaemonSet is deleted, there are // no Antrea Pods left. var propagationPolicy = metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &propagationPolicy, } deleteDS := func(ds string) error { if err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Delete(context.TODO(), ds, deleteOptions); err != nil { if errors.IsNotFound(err) { // no Antrea DaemonSet running, we return right away return nil } return fmt.Errorf("error when trying to delete Antrea DaemonSet: %v", err) } err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if _, err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Get(context.TODO(), ds, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { // Antrea DaemonSet does not exist any more, success return true, nil } return false, fmt.Errorf("error when trying to get Antrea DaemonSet after deletion: %v", err) } // Keep trying return false, nil }) return err } if err := deleteDS(antreaDaemonSet); err != nil { return err } if err := deleteDS(antreaWindowsDaemonSet); err != nil { return err } return nil } // getImageName gets the image name from the fully qualified URI. // For example: "gcr.io/kubernetes-e2e-test-images/agnhost:2.8" gets "agnhost". func getImageName(uri string) string { registryAndImage := strings.Split(uri, ":")[0] paths := strings.Split(registryAndImage, "/") return paths[len(paths)-1] } // createPodOnNode creates a pod in the test namespace with a container whose type is decided by imageName. // Pod will be scheduled on the specified Node (if nodeName is not empty). // mutateFunc can be used to customize the Pod if the other parameters don't meet the requirements. func (data *TestData) createPodOnNode(name string, nodeName string, image string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(*corev1.Pod)) error { // image could be a fully qualified URI which can't be used as container name and label value, // extract the image name from it. imageName := getImageName(image) return data.createPodOnNodeInNamespace(name, testNamespace, nodeName, imageName, image, command, args, env, ports, hostNetwork, mutateFunc) } // createPodOnNodeInNamespace creates a pod in the provided namespace with a container whose type is decided by imageName. // Pod will be scheduled on the specified Node (if nodeName is not empty). // mutateFunc can be used to customize the Pod if the other parameters don't meet the requirements. func (data *TestData) createPodOnNodeInNamespace(name, ns string, nodeName, ctrName string, image string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(*corev1.Pod)) error { podSpec := corev1.PodSpec{ Containers: []corev1.Container{ { Name: ctrName, Image: image, ImagePullPolicy: corev1.PullIfNotPresent, Command: command, Args: args, Env: env, Ports: ports, }, }, RestartPolicy: corev1.RestartPolicyNever, HostNetwork: hostNetwork, } if nodeName != "" { podSpec.NodeSelector = map[string]string{ "kubernetes.io/hostname": nodeName, } } if nodeName == controlPlaneNodeName() { // tolerate NoSchedule taint if we want Pod to run on control-plane Node noScheduleToleration := controlPlaneNoScheduleToleration() podSpec.Tolerations = []corev1.Toleration{noScheduleToleration} } pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "antrea-e2e": name, "app": ctrName, }, }, Spec: podSpec, } if mutateFunc != nil { mutateFunc(pod) } if _, err := data.clientset.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { return err } return nil } // createBusyboxPodOnNode creates a Pod in the test namespace with a single busybox container. The // Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createBusyboxPodOnNode(name string, nodeName string) error { sleepDuration := 3600 // seconds return data.createPodOnNode(name, nodeName, busyboxImage, []string{"sleep", strconv.Itoa(sleepDuration)}, nil, nil, nil, false, nil) } // createHostNetworkBusyboxPodOnNode creates a host network Pod in the test namespace with a single busybox container. // The Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createHostNetworkBusyboxPodOnNode(name string, nodeName string) error { sleepDuration := 3600 // seconds return data.createPodOnNode(name, nodeName, busyboxImage, []string{"sleep", strconv.Itoa(sleepDuration)}, nil, nil, nil, true, nil) } // createNginxPodOnNode creates a Pod in the test namespace with a single nginx container. The // Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createNginxPodOnNode(name string, nodeName string) error { return data.createPodOnNode(name, nodeName, nginxImage, []string{}, nil, nil, []corev1.ContainerPort{ { Name: "http", ContainerPort: 80, Protocol: corev1.ProtocolTCP, }, }, false, nil) } // createServerPod creates a Pod that can listen to specified port and have named port set. func (data *TestData) createServerPod(name string, portName string, portNum int32, setHostPort bool, hostNetwork bool) error { // See https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/porter/porter.go#L17 for the image's detail. cmd := "porter" env := corev1.EnvVar{Name: fmt.Sprintf("SERVE_PORT_%d", portNum), Value: "foo"} port := corev1.ContainerPort{Name: portName, ContainerPort: portNum} if setHostPort { // If hostPort is to be set, it must match the container port number. port.HostPort = int32(portNum) } return data.createPodOnNode(name, "", agnhostImage, nil, []string{cmd}, []corev1.EnvVar{env}, []corev1.ContainerPort{port}, hostNetwork, nil) } // createCustomPod creates a Pod in given Namespace with custom labels. func (data *TestData) createServerPodWithLabels(name, ns string, portNum int32, labels map[string]string) error { cmd := []string{"/agnhost", "serve-hostname", "--tcp", "--http=false", "--port", fmt.Sprintf("%d", portNum)} image := "k8s.gcr.io/e2e-test-images/agnhost:2.29" env := corev1.EnvVar{Name: fmt.Sprintf("SERVE_PORT_%d", portNum), Value: "foo"} port := corev1.ContainerPort{ContainerPort: portNum} containerName := fmt.Sprintf("c%v", portNum) mutateLabels := func(pod *corev1.Pod) { for k, v := range labels { pod.Labels[k] = v } } return data.createPodOnNodeInNamespace(name, ns, "", containerName, image, cmd, nil, []corev1.EnvVar{env}, []corev1.ContainerPort{port}, false, mutateLabels) } // deletePod deletes a Pod in the test namespace. func (data *TestData) deletePod(namespace, name string) error { var gracePeriodSeconds int64 = 5 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } if err := data.clientset.CoreV1().Pods(namespace).Delete(context.TODO(), name, deleteOptions); err != nil { if !errors.IsNotFound(err) { return err } } return nil } // Deletes a Pod in the test namespace then waits us to timeout for the Pod not to be visible to the // client any more. func (data *TestData) deletePodAndWait(timeout time.Duration, name string) error { if err := data.deletePod(testNamespace, name); err != nil { return err } if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if _, err := data.clientset.CoreV1().Pods(testNamespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return true, nil } return false, fmt.Errorf("error when getting Pod: %v", err) } // Keep trying return false, nil }); err == wait.ErrWaitTimeout { return fmt.Errorf("Pod '%s' still visible to client after %v", name, timeout) } else { return err } } type PodCondition func(*corev1.Pod) (bool, error) // podWaitFor polls the K8s apiserver until the specified Pod is found (in the test Namespace) and // the condition predicate is met (or until the provided timeout expires). func (data *TestData) podWaitFor(timeout time.Duration, name, namespace string, condition PodCondition) (*corev1.Pod, error) { err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if pod, err := data.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return false, nil } return false, fmt.Errorf("error when getting Pod '%s': %v", name, err) } else { return condition(pod) } }) if err != nil { return nil, err } return data.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } // podWaitForRunning polls the k8s apiserver until the specified Pod is in the "running" state (or // until the provided timeout expires). func (data *TestData) podWaitForRunning(timeout time.Duration, name, namespace string) error { _, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) return err } // podWaitForIPs polls the K8s apiserver until the specified Pod is in the "running" state (or until // the provided timeout expires). The function then returns the IP addresses assigned to the Pod. If the // Pod is not using "hostNetwork", the function also checks that an IP address exists in each required // Address Family in the cluster. func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace string) (*PodIPs, error) { pod, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) if err != nil { return nil, err } // According to the K8s API documentation (https://godoc.org/k8s.io/api/core/v1#PodStatus), // the PodIP field should only be empty if the Pod has not yet been scheduled, and "running" // implies scheduled. if pod.Status.PodIP == "" { return nil, fmt.Errorf("Pod is running but has no assigned IP, which should never happen") } podIPStrings := sets.NewString(pod.Status.PodIP) for _, podIP := range pod.Status.PodIPs { ipStr := strings.TrimSpace(podIP.IP) if ipStr != "" { podIPStrings.Insert(ipStr) } } ips, err := parsePodIPs(podIPStrings) if err != nil { return nil, err } if !pod.Spec.HostNetwork { if clusterInfo.podV4NetworkCIDR != "" && ips.ipv4 == nil { return nil, fmt.Errorf("no IPv4 address is assigned while cluster was configured with IPv4 Pod CIDR %s", clusterInfo.podV4NetworkCIDR) } if clusterInfo.podV6NetworkCIDR != "" && ips.ipv6 == nil { return nil, fmt.Errorf("no IPv6 address is assigned while cluster was configured with IPv6 Pod CIDR %s", clusterInfo.podV6NetworkCIDR) } } return ips, nil } func parsePodIPs(podIPStrings sets.String) (*PodIPs, error) { ips := new(PodIPs) for idx := range podIPStrings.List() { ipStr := podIPStrings.List()[idx] ip := net.ParseIP(ipStr) if ip.To4() != nil { if ips.ipv4 != nil && ipStr != ips.ipv4.String() { return nil, fmt.Errorf("Pod is assigned multiple IPv4 addresses: %s and %s", ips.ipv4.String(), ipStr) } if ips.ipv4 == nil { ips.ipv4 = &ip ips.ipStrings = append(ips.ipStrings, ipStr) } } else { if ips.ipv6 != nil && ipStr != ips.ipv6.String() { return nil, fmt.Errorf("Pod is assigned multiple IPv6 addresses: %s and %s", ips.ipv6.String(), ipStr) } if ips.ipv6 == nil { ips.ipv6 = &ip ips.ipStrings = append(ips.ipStrings, ipStr) } } } if len(ips.ipStrings) == 0 { return nil, fmt.Errorf("pod is running but has no assigned IP, which should never happen") } return ips, nil } // deleteAntreaAgentOnNode deletes the antrea-agent Pod on a specific Node and measure how long it // takes for the Pod not to be visible to the client any more. It also waits for a new antrea-agent // Pod to be running on the Node. func (data *TestData) deleteAntreaAgentOnNode(nodeName string, gracePeriodSeconds int64, timeout time.Duration) (time.Duration, error) { if testOptions.enableCoverage { data.gracefulExitAntreaAgent(testOptions.coverageDir, nodeName) } listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName), } // we do not use DeleteCollection directly because we want to ensure the resources no longer // exist by the time we return pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return 0, fmt.Errorf("failed to list antrea-agent Pods on Node '%s': %v", nodeName, err) } // in the normal case, there should be a single Pod in the list if len(pods.Items) == 0 { return 0, fmt.Errorf("no available antrea-agent Pods on Node '%s'", nodeName) } deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } start := time.Now() if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return 0, fmt.Errorf("error when deleting antrea-agent Pods on Node '%s': %v", nodeName, err) } if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { for _, pod := range pods.Items { if _, err := data.clientset.CoreV1().Pods(antreaNamespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { continue } return false, fmt.Errorf("error when getting Pod: %v", err) } // Keep trying, at least one Pod left return false, nil } return true, nil }); err != nil { return 0, err } delay := time.Since(start) // wait for new antrea-agent Pod if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return false, fmt.Errorf("failed to list antrea-agent Pods on Node '%s': %v", nodeName, err) } if len(pods.Items) == 0 { // keep trying return false, nil } for _, pod := range pods.Items { if pod.Status.Phase != corev1.PodRunning { return false, nil } } return true, nil }); err != nil { return 0, err } return delay, nil } // getAntreaPodOnNode retrieves the name of the Antrea Pod (antrea-agent-*) running on a specific Node. func (data *TestData) getAntreaPodOnNode(nodeName string) (podName string, err error) { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName), } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return "", fmt.Errorf("failed to list Antrea Pods: %v", err) } if len(pods.Items) != 1 { return "", fmt.Errorf("expected *exactly* one Pod") } return pods.Items[0].Name, nil } // getAntreaController retrieves the name of the Antrea Controller (antrea-controller-*) running in the k8s cluster. func (data *TestData) getAntreaController() (*corev1.Pod, error) { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-controller", } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return nil, fmt.Errorf("failed to list Antrea Controller: %v", err) } if len(pods.Items) != 1 { return nil, fmt.Errorf("expected *exactly* one Pod") } return &pods.Items[0], nil } // restartAntreaControllerPod deletes the antrea-controller Pod to force it to be re-scheduled. It then waits // for the new Pod to become available, and returns it. func (data *TestData) restartAntreaControllerPod(timeout time.Duration) (*corev1.Pod, error) { if testOptions.enableCoverage { data.gracefulExitAntreaController(testOptions.coverageDir) } var gracePeriodSeconds int64 = 1 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-controller", } if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return nil, fmt.Errorf("error when deleting antrea-controller Pod: %v", err) } var newPod *corev1.Pod // wait for new antrea-controller Pod if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return false, fmt.Errorf("failed to list antrea-controller Pods: %v", err) } // Even though the strategy is "Recreate", the old Pod might still be in terminating state when the new Pod is // running as this is deleting a Pod manually, not upgrade. // See https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#recreate-deployment. // So we should ensure there's only 1 Pod and it's running. if len(pods.Items) != 1 || pods.Items[0].DeletionTimestamp != nil { return false, nil } pod := pods.Items[0] ready := false for _, condition := range pod.Status.Conditions { if condition.Type == corev1.PodReady { ready = condition.Status == corev1.ConditionTrue break } } if !ready { return false, nil } newPod = &pod return true, nil }); err != nil { return nil, err } return newPod, nil } // restartAntreaAgentPods deletes all the antrea-agent Pods to force them to be re-scheduled. It // then waits for the new Pods to become available. func (data *TestData) restartAntreaAgentPods(timeout time.Duration) error { if testOptions.enableCoverage { data.gracefulExitAntreaAgent(testOptions.coverageDir, "all") } var gracePeriodSeconds int64 = 1 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", } if err := data.clientset.CoreV1().Pods(antreaNamespace).DeleteCollection(context.TODO(), deleteOptions, listOptions); err != nil { return fmt.Errorf("error when deleting antrea-agent Pods: %v", err) } return data.waitForAntreaDaemonSetPods(timeout) } // validatePodIP checks that the provided IP address is in the Pod Network CIDR for the cluster. func validatePodIP(podNetworkCIDR string, ip net.IP) (bool, error) { _, cidr, err := net.ParseCIDR(podNetworkCIDR) if err != nil { return false, fmt.Errorf("podNetworkCIDR '%s' is not a valid CIDR", podNetworkCIDR) } return cidr.Contains(ip), nil } // createService creates a service with port and targetPort. func (data *TestData) createService(serviceName string, port, targetPort int32, selector map[string]string, affinity bool, serviceType corev1.ServiceType, ipFamily *corev1.IPFamily) (*corev1.Service, error) { annotation := make(map[string]string) return data.createServiceWithAnnotations(serviceName, port, targetPort, selector, affinity, serviceType, ipFamily, annotation) } // createService creates a service with Annotation func (data *TestData) createServiceWithAnnotations(serviceName string, port, targetPort int32, selector map[string]string, affinity bool, serviceType corev1.ServiceType, ipFamily *corev1.IPFamily, annotations map[string]string) (*corev1.Service, error) { affinityType := corev1.ServiceAffinityNone var ipFamilies []corev1.IPFamily if ipFamily != nil { ipFamilies = append(ipFamilies, *ipFamily) } if affinity { affinityType = corev1.ServiceAffinityClientIP } service := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: testNamespace, Labels: map[string]string{ "antrea-e2e": serviceName, "app": serviceName, }, Annotations: annotations, }, Spec: corev1.ServiceSpec{ SessionAffinity: affinityType, Ports: []corev1.ServicePort{{ Port: port, TargetPort: intstr.FromInt(int(targetPort)), }}, Type: serviceType, Selector: selector, IPFamilies: ipFamilies, }, } return data.clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &service, metav1.CreateOptions{}) } // createNginxClusterIPServiceWithAnnotations creates nginx service with Annotation func (data *TestData) createNginxClusterIPServiceWithAnnotations(affinity bool, ipFamily *corev1.IPFamily, annotation map[string]string) (*corev1.Service, error) { return data.createServiceWithAnnotations("nginx", 80, 80, map[string]string{"app": "nginx"}, affinity, corev1.ServiceTypeClusterIP, ipFamily, annotation) } // createNginxClusterIPService create a nginx service with the given name. func (data *TestData) createNginxClusterIPService(name string, affinity bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { if name == "" { name = "nginx" } return data.createService(name, 80, 80, map[string]string{"app": "nginx"}, affinity, corev1.ServiceTypeClusterIP, ipFamily) } func (data *TestData) createNginxLoadBalancerService(affinity bool, ingressIPs []string, ipFamily *corev1.IPFamily) (*corev1.Service, error) { svc, err := data.createService(nginxLBService, 80, 80, map[string]string{"app": "nginx"}, affinity, corev1.ServiceTypeLoadBalancer, ipFamily) if err != nil { return svc, err } ingress := make([]corev1.LoadBalancerIngress, len(ingressIPs)) for idx, ingressIP := range ingressIPs { ingress[idx].IP = ingressIP } updatedSvc := svc.DeepCopy() updatedSvc.Status.LoadBalancer.Ingress = ingress patchData, err := json.Marshal(updatedSvc) if err != nil { return svc, err } return data.clientset.CoreV1().Services(svc.Namespace).Patch(context.TODO(), svc.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "status") } // deleteService deletes the service. func (data *TestData) deleteService(name string) error { if err := data.clientset.CoreV1().Services(testNamespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("unable to cleanup service %v: %v", name, err) } return nil } // Deletes a Service in the test namespace then waits us to timeout for the Service not to be visible to the // client any more. func (data *TestData) deleteServiceAndWait(timeout time.Duration, name string) error { if err := data.deleteService(name); err != nil { return err } if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if _, err := data.clientset.CoreV1().Services(testNamespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return true, nil } return false, fmt.Errorf("error when getting Service: %v", err) } // Keep trying return false, nil }); err == wait.ErrWaitTimeout { return fmt.Errorf("Service '%s' still visible to client after %v", name, timeout) } else { return err } } // createNetworkPolicy creates a network policy with spec. func (data *TestData) createNetworkPolicy(name string, spec *networkingv1.NetworkPolicySpec) (*networkingv1.NetworkPolicy, error) { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "antrea-e2e": name, }, }, Spec: *spec, } return data.clientset.NetworkingV1().NetworkPolicies(testNamespace).Create(context.TODO(), policy, metav1.CreateOptions{}) } // deleteNetworkpolicy deletes the network policy. func (data *TestData) deleteNetworkpolicy(policy *networkingv1.NetworkPolicy) error { if err := data.clientset.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(context.TODO(), policy.Name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("unable to cleanup policy %v: %v", policy.Name, err) } return nil } // A DNS-1123 subdomain must consist of lower case alphanumeric characters var lettersAndDigits = []rune("abcdefghijklmnopqrstuvwxyz0123456789") func randSeq(n int) string { b := make([]rune, n) for i := range b { // #nosec G404: random number generator not used for security purposes randIdx := rand.Intn(len(lettersAndDigits)) b[i] = lettersAndDigits[randIdx] } return string(b) } func randName(prefix string) string { return prefix + randSeq(nameSuffixLength) } // Run the provided command in the specified Container for the give Pod and returns the contents of // stdout and stderr as strings. An error either indicates that the command couldn't be run or that // the command returned a non-zero error code. func (data *TestData) runCommandFromPod(podNamespace string, podName string, containerName string, cmd []string) (stdout string, stderr string, err error) { request := data.clientset.CoreV1().RESTClient().Post(). Namespace(podNamespace). Resource("pods"). Name(podName). SubResource("exec"). Param("container", containerName). VersionedParams(&corev1.PodExecOptions{ Command: cmd, Stdin: false, Stdout: true, Stderr: true, TTY: false, }, scheme.ParameterCodec) exec, err := remotecommand.NewSPDYExecutor(data.kubeConfig, "POST", request.URL()) if err != nil { return "", "", err } var stdoutB, stderrB bytes.Buffer if err := exec.Stream(remotecommand.StreamOptions{ Stdout: &stdoutB, Stderr: &stderrB, }); err != nil { return stdoutB.String(), stderrB.String(), err } return stdoutB.String(), stderrB.String(), nil } func forAllNodes(fn func(nodeName string) error) error { for idx := 0; idx < clusterInfo.numNodes; idx++ { name := nodeName(idx) if name == "" { return fmt.Errorf("unexpected empty name for Node %d", idx) } if err := fn(name); err != nil { return err } } return nil } // forAllMatchingPodsInNamespace invokes the provided function for every Pod currently running on every Node in a given // namespace and which matches labelSelector criteria. func (data *TestData) forAllMatchingPodsInNamespace( labelSelector, nsName string, fn func(nodeName string, podName string, nsName string) error) error { for _, node := range clusterInfo.nodes { listOptions := metav1.ListOptions{ LabelSelector: labelSelector, FieldSelector: fmt.Sprintf("spec.nodeName=%s", node.name), } pods, err := data.clientset.CoreV1().Pods(nsName).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("failed to list Antrea Pods on Node '%s': %v", node.name, err) } for _, pod := range pods.Items { if err := fn(node.name, pod.Name, nsName); err != nil { return err } } } return nil } func parseArpingStdout(out string) (sent uint32, received uint32, loss float32, err error) { re := regexp.MustCompile(`Sent\s+(\d+)\s+probe.*\nReceived\s+(\d+)\s+response`) matches := re.FindStringSubmatch(out) if len(matches) == 0 { return 0, 0, 0.0, fmt.Errorf("Unexpected arping output") } if v, err := strconv.ParseUint(matches[1], 10, 32); err != nil { return 0, 0, 0.0, fmt.Errorf("Error when retrieving 'sent probes' from arpping output: %v", err) } else { sent = uint32(v) } if v, err := strconv.ParseUint(matches[2], 10, 32); err != nil { return 0, 0, 0.0, fmt.Errorf("Error when retrieving 'received responses' from arpping output: %v", err) } else { received = uint32(v) } loss = 100. * float32(sent-received) / float32(sent) return sent, received, loss, nil } func (data *TestData) runPingCommandFromTestPod(podInfo podInfo, targetPodIPs *PodIPs, ctrName string, count int, size int) error { countOption, sizeOption := "-c", "-s" if podInfo.os == "windows" { countOption = "-n" sizeOption = "-l" } else if podInfo.os != "linux" { return fmt.Errorf("OS of Pod '%s' is not clear", podInfo.name) } cmd := []string{"ping", countOption, strconv.Itoa(count)} if size != 0 { cmd = append(cmd, sizeOption, strconv.Itoa(size)) } if targetPodIPs.ipv4 != nil { cmdV4 := append(cmd, "-4", targetPodIPs.ipv4.String()) if stdout, stderr, err := data.runCommandFromPod(testNamespace, podInfo.name, ctrName, cmdV4); err != nil { return fmt.Errorf("error when running ping command '%s': %v - stdout: %s - stderr: %s", strings.Join(cmdV4, " "), err, stdout, stderr) } } if targetPodIPs.ipv6 != nil { cmdV6 := append(cmd, "-6", targetPodIPs.ipv6.String()) if stdout, stderr, err := data.runCommandFromPod(testNamespace, podInfo.name, ctrName, cmdV6); err != nil { return fmt.Errorf("error when running ping command '%s': %v - stdout: %s - stderr: %s", strings.Join(cmdV6, " "), err, stdout, stderr) } } return nil } func (data *TestData) runNetcatCommandFromTestPod(podName string, server string, port int32) error { // Retrying several times to avoid flakes as the test may involve DNS (coredns) and Service/Endpoints (kube-proxy). cmd := []string{ "/bin/sh", "-c", fmt.Sprintf("for i in $(seq 1 5); do nc -vz -w 4 %s %d && exit 0 || sleep 1; done; exit 1", server, port), } stdout, stderr, err := data.runCommandFromPod(testNamespace, podName, busyboxContainerName, cmd) if err == nil { return nil } return fmt.Errorf("nc stdout: <%v>, stderr: <%v>, err: <%v>", stdout, stderr, err) } func (data *TestData) doesOVSPortExist(antreaPodName string, portName string) (bool, error) { cmd := []string{"ovs-vsctl", "port-to-br", portName} _, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err == nil { return true, nil } else if strings.Contains(stderr, "no port named") { return false, nil } return false, fmt.Errorf("error when running ovs-vsctl command on Pod '%s': %v", antreaPodName, err) } func (data *TestData) GetEncapMode() (config.TrafficEncapModeType, error) { configMap, err := data.GetAntreaConfigMap(antreaNamespace) if err != nil { return config.TrafficEncapModeInvalid, fmt.Errorf("failed to get Antrea ConfigMap: %v", err) } for _, antreaConfig := range configMap.Data { for _, mode := range config.GetTrafficEncapModes() { searchStr := fmt.Sprintf("trafficEncapMode: %s", mode) if strings.Index(strings.ToLower(antreaConfig), strings.ToLower(searchStr)) != -1 { return mode, nil } } } return config.TrafficEncapModeEncap, nil } func (data *TestData) getFeatures(confName string, antreaNamespace string) (featuregate.FeatureGate, error) { featureGate := features.DefaultMutableFeatureGate.DeepCopy() cfgMap, err := data.GetAntreaConfigMap(antreaNamespace) if err != nil { return nil, err } var cfg interface{} if err := yaml.Unmarshal([]byte(cfgMap.Data[confName]), &cfg); err != nil { return nil, err } rawFeatureGateMap, ok := cfg.(map[interface{}]interface{})["featureGates"] if !ok || rawFeatureGateMap == nil { return featureGate, nil } featureGateMap := make(map[string]bool) for k, v := range rawFeatureGateMap.(map[interface{}]interface{}) { featureGateMap[k.(string)] = v.(bool) } if err := featureGate.SetFromMap(featureGateMap); err != nil { return nil, err } return featureGate, nil } func (data *TestData) GetAgentFeatures(antreaNamespace string) (featuregate.FeatureGate, error) { return data.getFeatures(antreaAgentConfName, antreaNamespace) } func (data *TestData) GetControllerFeatures(antreaNamespace string) (featuregate.FeatureGate, error) { return data.getFeatures(antreaControllerConfName, antreaNamespace) } func (data *TestData) GetAntreaConfigMap(antreaNamespace string) (*corev1.ConfigMap, error) { deployment, err := data.clientset.AppsV1().Deployments(antreaNamespace).Get(context.TODO(), antreaDeployment, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to retrieve Antrea Controller deployment: %v", err) } var configMapName string for _, volume := range deployment.Spec.Template.Spec.Volumes { if volume.ConfigMap != nil && volume.Name == antreaConfigVolume { configMapName = volume.ConfigMap.Name break } } if len(configMapName) == 0 { return nil, fmt.Errorf("failed to locate %s ConfigMap volume", antreaConfigVolume) } configMap, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get ConfigMap %s: %v", configMapName, err) } return configMap, nil } func (data *TestData) GetGatewayInterfaceName(antreaNamespace string) (string, error) { configMap, err := data.GetAntreaConfigMap(antreaNamespace) if err != nil { return "", err } agentConfData := configMap.Data["antrea-agent.conf"] for _, line := range strings.Split(agentConfData, "\n") { if strings.HasPrefix(line, "hostGateway") { return strings.Fields(line)[1], nil } } return antreaDefaultGW, nil } func (data *TestData) mutateAntreaConfigMap(controllerChanges []configChange, agentChanges []configChange, restartController, restartAgent bool) error { configMap, err := data.GetAntreaConfigMap(antreaNamespace) if err != nil { return err } controllerConf, _ := configMap.Data["antrea-controller.conf"] for _, c := range controllerChanges { controllerConf = replaceFieldValue(controllerConf, c) } configMap.Data["antrea-controller.conf"] = controllerConf agentConf, _ := configMap.Data["antrea-agent.conf"] for _, c := range agentChanges { agentConf = replaceFieldValue(agentConf, c) } configMap.Data["antrea-agent.conf"] = agentConf if _, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed to update ConfigMap %s: %v", configMap.Name, err) } if restartAgent { err = data.restartAntreaAgentPods(defaultTimeout) if err != nil { return fmt.Errorf("error when restarting antrea-agent Pod: %v", err) } } // controller should be restarted after agents in case of dataplane disruption caused by agent restart on Kind cluster. if restartController { _, err = data.restartAntreaControllerPod(defaultTimeout) if err != nil { return fmt.Errorf("error when restarting antrea-controller Pod: %v", err) } } return nil } func replaceFieldValue(content string, c configChange) string { var res string if c.isFeatureGate { r := regexp.MustCompile(fmt.Sprintf(`(?m)#? %s:.*$`, c.field)) res = r.ReplaceAllString(content, fmt.Sprintf(" %s: %s", c.field, c.value)) } else { r := regexp.MustCompile(fmt.Sprintf(`(?m)#?.*%s:.*$`, c.field)) res = r.ReplaceAllString(content, fmt.Sprintf("%s: %s", c.field, c.value)) } return res } // gracefulExitAntreaController copies the Antrea controller binary coverage data file out before terminating the Pod func (data *TestData) gracefulExitAntreaController(covDir string) error { antreaController, err := data.getAntreaController() if err != nil { return fmt.Errorf("error when getting antrea-controller Pod: %v", err) } podName := antreaController.Name err = data.collectAntctlCovFiles(podName, "antrea-controller", antreaNamespace, covDir) if err != nil { return fmt.Errorf("error when graceful exit Antrea controller - copy antctl coverage files out: %v", err) } cmds := []string{"pgrep", "-f", antreaControllerCovBinary, "-P", "1"} stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) if err != nil { return fmt.Errorf("error when getting pid of '%s', stderr: <%v>, err: <%v>", antreaControllerCovBinary, stderr, err) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} _, stderr, err = data.runCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) if err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s', stderr: <%v>, err: <%v>", antreaControllerCovBinary, stderr, err) } err = data.copyPodFiles(podName, "antrea-controller", antreaNamespace, antreaControllerCovFile, covDir) if err != nil { return fmt.Errorf("error when graceful exit Antrea controller - copy antrea-controller coverage files out: %v", err) } return nil } // gracefulExitAntreaAgent copies the Antrea agent binary coverage data file out before terminating the Pod func (data *TestData) gracefulExitAntreaAgent(covDir string, nodeName string) error { listOptions := metav1.ListOptions{ LabelSelector: "app=antrea,component=antrea-agent", } if nodeName != "all" { listOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", nodeName) } pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("failed to list antrea-agent pods: %v", err) } for _, pod := range pods.Items { podName := pod.Name err := data.collectAntctlCovFiles(podName, "antrea-agent", antreaNamespace, covDir) if err != nil { return fmt.Errorf("error when graceful exit Antrea agent - copy antctl coverage files out: %v", err) } cmds := []string{"pgrep", "-f", antreaAgentCovBinary, "-P", "1"} stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) if err != nil { return fmt.Errorf("error when getting pid of '%s', stderr: <%v>, err: <%v>", antreaAgentCovBinary, stderr, err) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} _, stderr, err = data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) if err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s', stderr: <%v>, err: <%v>", antreaAgentCovBinary, stderr, err) } err = data.copyPodFiles(podName, "antrea-agent", antreaNamespace, antreaAgentCovFile, covDir) if err != nil { return fmt.Errorf("error when graceful exit Antrea agent - copy antrea-agent coverage files out: %v", err) } } return nil } // gracefulExitFlowAggregator copies the Flow Aggregator binary coverage data file out before terminating the Pod. func (data *TestData) gracefulExitFlowAggregator(covDir string) error { listOptions := metav1.ListOptions{ LabelSelector: "app=flow-aggregator", } pods, err := data.clientset.CoreV1().Pods(flowAggregatorNamespace).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("failed to list Flow Aggregator Pod: %v", err) } if len(pods.Items) != 1 { return fmt.Errorf("expected *exactly* one Pod") } flowAggPod := &pods.Items[0] podName := flowAggPod.Name cmds := []string{"pgrep", "-f", flowAggregatorCovBinary, "-P", "1"} stdout, stderr, err := data.runCommandFromPod(flowAggregatorNamespace, podName, "flow-aggregator", cmds) if err != nil { _, describeStdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace)) return fmt.Errorf("error when getting pid of '%s', stdout: <%v>, stderr: <%v>, err: <%v>, describe stdout: <%v>", flowAggregatorCovBinary, stdout, stderr, err, describeStdout) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} if _, stderr, err = data.runCommandFromPod(flowAggregatorNamespace, podName, "flow-aggregator", cmds); err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s', stderr: <%v>, err: <%v>", flowAggregatorCovBinary, stderr, err) } if err = data.copyPodFiles(podName, "flow-aggregator", flowAggregatorNamespace, flowAggregatorCovFile, covDir); err != nil { return fmt.Errorf("error when gracefully exiting Flow Aggregator - copy flow-aggregator coverage files out: %v", err) } return nil } // collectAntctlCovFiles collects coverage files for the antctl binary from the Pod and saves them to the coverage directory func (data *TestData) collectAntctlCovFiles(podName string, containerName string, nsName string, covDir string) error { // copy antctl coverage files from Pod to the coverage directory cmds := []string{"bash", "-c", "find . -maxdepth 1 -name 'antctl*.out' -exec basename {} ';'"} stdout, stderr, err := data.runCommandFromPod(nsName, podName, containerName, cmds) if err != nil { return fmt.Errorf("error when running this find command '%s' on Pod '%s', stderr: <%v>, err: <%v>", cmds, podName, stderr, err) } stdout = strings.TrimSpace(stdout) files := strings.Split(stdout, "\n") for _, file := range files { if len(file) == 0 { continue } err := data.copyPodFiles(podName, containerName, nsName, file, covDir) if err != nil { return fmt.Errorf("error when copying coverage files for antctl from Pod '%s' to coverage directory '%s': %v", podName, covDir, err) } } return nil } // collectAntctlCovFilesFromControlPlaneNode collects coverage files for the antctl binary from the control-plane Node and saves them to the coverage directory func (data *TestData) collectAntctlCovFilesFromControlPlaneNode(covDir string) error { // copy antctl coverage files from node to the coverage directory var cmd string if testOptions.providerName == "kind" { cmd = "/bin/sh -c find . -maxdepth 1 -name 'antctl*.out' -exec basename {} ';'" } else { cmd = "find . -maxdepth 1 -name 'antctl*.out' -exec basename {} ';'" } rc, stdout, stderr, err := RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return fmt.Errorf("error when running this find command '%s' on control-plane Node '%s', stderr: <%v>, err: <%v>", cmd, controlPlaneNodeName(), stderr, err) } stdout = strings.TrimSpace(stdout) files := strings.Split(stdout, "\n") for _, file := range files { if len(file) == 0 { continue } err := data.copyNodeFiles(controlPlaneNodeName(), file, covDir) if err != nil { return fmt.Errorf("error when copying coverage files for antctl from Node '%s' to coverage directory '%s': %v", controlPlaneNodeName(), covDir, err) } } return nil } // copyPodFiles copies file from a Pod and save it to specified directory func (data *TestData) copyPodFiles(podName string, containerName string, nsName string, fileName string, covDir string) error { // getPodWriter creates the file with name podName-fileName-suffix. It returns nil if the // file cannot be created. File must be closed by the caller. getPodWriter := func(podName, fileName, suffix string) *os.File { covFile := filepath.Join(covDir, fmt.Sprintf("%s-%s-%s", podName, fileName, suffix)) f, err := os.Create(covFile) if err != nil { _ = fmt.Errorf("error when creating coverage file '%s': %v", covFile, err) return nil } return f } // dump the file from Antrea Pods to disk. // a filepath-friendly timestamp format. const timeFormat = "Jan02-15-04-05" timeStamp := time.Now().Format(timeFormat) w := getPodWriter(podName, fileName, timeStamp) if w == nil { return nil } defer w.Close() cmd := []string{"cat", fileName} stdout, stderr, err := data.runCommandFromPod(nsName, podName, containerName, cmd) if err != nil { return fmt.Errorf("cannot retrieve content of file '%s' from Pod '%s', stderr: <%v>, err: <%v>", fileName, podName, stderr, err) } if stdout == "" { return nil } w.WriteString(stdout) return nil } // copyNodeFiles copies a file from a Node and save it to specified directory func (data *TestData) copyNodeFiles(nodeName string, fileName string, covDir string) error { // getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file // cannot be created. File must be closed by the caller. getNodeWriter := func(nodeName, fileName, suffix string) *os.File { covFile := filepath.Join(covDir, fmt.Sprintf("%s-%s-%s", nodeName, fileName, suffix)) f, err := os.Create(covFile) if err != nil { _ = fmt.Errorf("error when creating coverage file '%s': %v", covFile, err) return nil } return f } // dump the file from Antrea Pods to disk. // a filepath-friendly timestamp format. const timeFormat = "Jan02-15-04-05" timeStamp := time.Now().Format(timeFormat) w := getNodeWriter(nodeName, fileName, timeStamp) if w == nil { return nil } defer w.Close() cmd := fmt.Sprintf("cat %s", fileName) rc, stdout, stderr, err := RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return fmt.Errorf("cannot retrieve content of file '%s' from Node '%s', stderr: <%v>, err: <%v>", fileName, controlPlaneNodeName(), stderr, err) } if stdout == "" { return nil } w.WriteString(stdout) return nil } // createAgnhostPodOnNode creates a Pod in the test namespace with a single agnhost container. The // Pod will be scheduled on the specified Node (if nodeName is not empty). func (data *TestData) createAgnhostPodOnNode(name string, nodeName string) error { sleepDuration := 3600 // seconds return data.createPodOnNode(name, nodeName, agnhostImage, []string{"sleep", strconv.Itoa(sleepDuration)}, nil, nil, nil, false, nil) } func (data *TestData) createDaemonSet(name string, ns string, ctrName string, image string, cmd []string, args []string) (*appsv1.DaemonSet, func() error, error) { podSpec := corev1.PodSpec{ Tolerations: []corev1.Toleration{ controlPlaneNoScheduleToleration(), }, Containers: []corev1.Container{ { Name: ctrName, Image: image, ImagePullPolicy: corev1.PullIfNotPresent, Command: cmd, Args: args, }, }, } dsSpec := appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "antrea-e2e": name, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "antrea-e2e": name, }, }, Spec: podSpec, }, UpdateStrategy: appsv1.DaemonSetUpdateStrategy{}, MinReadySeconds: 0, RevisionHistoryLimit: nil, } ds := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "antrea-e2e": name, }, }, Spec: dsSpec, } resDS, err := data.clientset.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { return nil, nil, err } cleanup := func() error { if err := data.clientset.AppsV1().DaemonSets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { return err } return nil } return resDS, cleanup, nil } func (data *TestData) waitForDaemonSetPods(timeout time.Duration, dsName string, namespace string) error { err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if ds, err := data.clientset.AppsV1().DaemonSets(namespace).Get(context.TODO(), dsName, metav1.GetOptions{}); err != nil { return false, err } else { if ds.Status.NumberReady != int32(clusterInfo.numNodes) { return false, nil } return true, nil } }) if err != nil { return err } return nil }
1
40,055
we should also skip the e2e flow aggregator tests if the Flow Exporter feature is disabled.
antrea-io-antrea
go
@@ -225,6 +225,7 @@ class ConsoleMaster(flow.FlowMaster): if err: print >> sys.stderr, "Script load error:", err sys.exit(1) + script.ObserveScripts(self, i) if options.outfile: err = self.start_stream_to_path(
1
from __future__ import absolute_import import mailcap import mimetypes import tempfile import os import os.path import shlex import signal import stat import subprocess import sys import traceback import urwid import weakref from .. import controller, flow, script, contentviews from . import flowlist, flowview, help, window, signals, options from . import grideditor, palettes, statusbar, palettepicker EVENTLOG_SIZE = 500 class ConsoleState(flow.State): def __init__(self): flow.State.__init__(self) self.focus = None self.follow_focus = None self.default_body_view = contentviews.get("Auto") self.flowsettings = weakref.WeakKeyDictionary() self.last_search = None def __setattr__(self, name, value): self.__dict__[name] = value signals.update_settings.send(self) def add_flow_setting(self, flow, key, value): d = self.flowsettings.setdefault(flow, {}) d[key] = value def get_flow_setting(self, flow, key, default=None): d = self.flowsettings.get(flow, {}) return d.get(key, default) def add_flow(self, f): super(ConsoleState, self).add_flow(f) if self.focus is None: self.set_focus(0) elif self.follow_focus: self.set_focus(len(self.view) - 1) self.set_flow_marked(f, False) return f def update_flow(self, f): super(ConsoleState, self).update_flow(f) if self.focus is None: self.set_focus(0) return f def set_limit(self, limit): ret = flow.State.set_limit(self, limit) self.set_focus(self.focus) return ret def get_focus(self): if not self.view or self.focus is None: return None, None return self.view[self.focus], self.focus def set_focus(self, idx): if self.view: if idx >= len(self.view): idx = len(self.view) - 1 elif idx < 0: idx = 0 self.focus = idx else: self.focus = None def set_focus_flow(self, f): self.set_focus(self.view.index(f)) def get_from_pos(self, pos): if len(self.view) <= pos or pos < 0: return None, None return self.view[pos], pos def get_next(self, pos): return self.get_from_pos(pos + 1) def get_prev(self, pos): return self.get_from_pos(pos - 1) def delete_flow(self, f): if f in self.view and self.view.index(f) <= self.focus: self.focus -= 1 if self.focus < 0: self.focus = None ret = flow.State.delete_flow(self, f) self.set_focus(self.focus) return ret def clear(self): marked_flows = [] for f in self.flows: if self.flow_marked(f): marked_flows.append(f) super(ConsoleState, self).clear() for f in marked_flows: self.add_flow(f) self.set_flow_marked(f, True) if len(self.flows.views) == 0: self.focus = None else: self.focus = 0 self.set_focus(self.focus) def flow_marked(self, flow): return self.get_flow_setting(flow, "marked", False) def set_flow_marked(self, flow, marked): self.add_flow_setting(flow, "marked", marked) class Options(object): attributes = [ "app", "app_domain", "app_ip", "anticache", "anticomp", "client_replay", "eventlog", "keepserving", "kill", "intercept", "limit", "no_server", "refresh_server_playback", "rfile", "scripts", "showhost", "replacements", "rheaders", "setheaders", "server_replay", "stickycookie", "stickyauth", "stream_large_bodies", "verbosity", "wfile", "nopop", "palette", "palette_transparent", "no_mouse" ] def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) for i in self.attributes: if not hasattr(self, i): setattr(self, i, None) class ConsoleMaster(flow.FlowMaster): palette = [] def __init__(self, server, options): flow.FlowMaster.__init__(self, server, ConsoleState()) self.stream_path = None self.options = options for i in options.replacements: self.replacehooks.add(*i) for i in options.setheaders: self.setheaders.add(*i) r = self.set_intercept(options.intercept) if r: print >> sys.stderr, "Intercept error:", r sys.exit(1) if options.limit: self.set_limit(options.limit) r = self.set_stickycookie(options.stickycookie) if r: print >> sys.stderr, "Sticky cookies error:", r sys.exit(1) r = self.set_stickyauth(options.stickyauth) if r: print >> sys.stderr, "Sticky auth error:", r sys.exit(1) self.set_stream_large_bodies(options.stream_large_bodies) self.refresh_server_playback = options.refresh_server_playback self.anticache = options.anticache self.anticomp = options.anticomp self.killextra = options.kill self.rheaders = options.rheaders self.nopop = options.nopop self.showhost = options.showhost self.palette = options.palette self.palette_transparent = options.palette_transparent self.eventlog = options.eventlog self.eventlist = urwid.SimpleListWalker([]) if options.client_replay: self.client_playback_path(options.client_replay) if options.server_replay: self.server_playback_path(options.server_replay) if options.scripts: for i in options.scripts: err = self.load_script(i) if err: print >> sys.stderr, "Script load error:", err sys.exit(1) if options.outfile: err = self.start_stream_to_path( options.outfile[0], options.outfile[1] ) if err: print >> sys.stderr, "Stream file error:", err sys.exit(1) self.view_stack = [] if options.app: self.start_app(self.options.app_host, self.options.app_port) signals.call_in.connect(self.sig_call_in) signals.pop_view_state.connect(self.sig_pop_view_state) signals.push_view_state.connect(self.sig_push_view_state) signals.sig_add_event.connect(self.sig_add_event) def __setattr__(self, name, value): self.__dict__[name] = value signals.update_settings.send(self) def sig_add_event(self, sender, e, level): needed = dict(error=0, info=1, debug=2).get(level, 1) if self.options.verbosity < needed: return if level == "error": e = urwid.Text(("error", str(e))) else: e = urwid.Text(str(e)) self.eventlist.append(e) if len(self.eventlist) > EVENTLOG_SIZE: self.eventlist.pop(0) self.eventlist.set_focus(len(self.eventlist) - 1) def add_event(self, e, level): signals.add_event(e, level) def sig_call_in(self, sender, seconds, callback, args=()): def cb(*_): return callback(*args) self.loop.set_alarm_in(seconds, cb) def sig_pop_view_state(self, sender): if len(self.view_stack) > 1: self.view_stack.pop() self.loop.widget = self.view_stack[-1] else: signals.status_prompt_onekey.send( self, prompt = "Quit", keys = ( ("yes", "y"), ("no", "n"), ), callback = self.quit, ) def sig_push_view_state(self, sender, window): self.view_stack.append(window) self.loop.widget = window self.loop.draw_screen() def start_stream_to_path(self, path, mode="wb"): path = os.path.expanduser(path) try: f = file(path, mode) self.start_stream(f, None) except IOError as v: return str(v) self.stream_path = path def _run_script_method(self, method, s, f): status, val = s.run(method, f) if val: if status: signals.add_event("Method %s return: %s" % (method, val), "debug") else: signals.add_event( "Method %s error: %s" % (method, val[1]), "error") def run_script_once(self, command, f): if not command: return signals.add_event("Running script on flow: %s" % command, "debug") try: s = script.Script(command, self) except script.ScriptError as v: signals.status_message.send( message = "Error loading script." ) signals.add_event("Error loading script:\n%s" % v.args[0], "error") return if f.request: self._run_script_method("request", s, f) if f.response: self._run_script_method("response", s, f) if f.error: self._run_script_method("error", s, f) s.unload() signals.flow_change.send(self, flow = f) def set_script(self, command): if not command: return ret = self.load_script(command) if ret: signals.status_message.send(message=ret) def toggle_eventlog(self): self.eventlog = not self.eventlog signals.pop_view_state.send(self) self.view_flowlist() def _readflows(self, path): """ Utitility function that reads a list of flows or prints an error to the UI if that fails. Returns - None, if there was an error. - a list of flows, otherwise. """ try: return flow.read_flows_from_paths(path) except flow.FlowReadError as e: signals.status_message.send(message=e.strerror) def client_playback_path(self, path): if not isinstance(path, list): path = [path] flows = self._readflows(path) if flows: self.start_client_playback(flows, False) def server_playback_path(self, path): if not isinstance(path, list): path = [path] flows = self._readflows(path) if flows: self.start_server_playback( flows, self.killextra, self.rheaders, False, self.nopop, self.options.replay_ignore_params, self.options.replay_ignore_content, self.options.replay_ignore_payload_params, self.options.replay_ignore_host ) def spawn_editor(self, data): fd, name = tempfile.mkstemp('', "mproxy") os.write(fd, data) os.close(fd) c = os.environ.get("EDITOR") # if no EDITOR is set, assume 'vi' if not c: c = "vi" cmd = shlex.split(c) cmd.append(name) self.ui.stop() try: subprocess.call(cmd) except: signals.status_message.send( message = "Can't start editor: %s" % " ".join(c) ) else: data = open(name, "rb").read() self.ui.start() os.unlink(name) return data def spawn_external_viewer(self, data, contenttype): if contenttype: contenttype = contenttype.split(";")[0] ext = mimetypes.guess_extension(contenttype) or "" else: ext = "" fd, name = tempfile.mkstemp(ext, "mproxy") os.write(fd, data) os.close(fd) # read-only to remind the user that this is a view function os.chmod(name, stat.S_IREAD) cmd = None shell = False if contenttype: c = mailcap.getcaps() cmd, _ = mailcap.findmatch(c, contenttype, filename=name) if cmd: shell = True if not cmd: # hm which one should get priority? c = os.environ.get("PAGER") or os.environ.get("EDITOR") if not c: c = "less" cmd = shlex.split(c) cmd.append(name) self.ui.stop() try: subprocess.call(cmd, shell=shell) except: signals.status_message.send( message="Can't start external viewer: %s" % " ".join(c) ) self.ui.start() os.unlink(name) def set_palette(self, name): self.palette = name self.ui.register_palette( palettes.palettes[name].palette(self.palette_transparent) ) self.ui.clear() def ticker(self, *userdata): changed = self.tick(self.masterq, timeout=0) if changed: self.loop.draw_screen() signals.update_settings.send() self.loop.set_alarm_in(0.01, self.ticker) def run(self): self.ui = urwid.raw_display.Screen() self.ui.set_terminal_properties(256) self.set_palette(self.palette) self.loop = urwid.MainLoop( urwid.SolidFill("x"), screen = self.ui, handle_mouse = not self.options.no_mouse, ) self.server.start_slave( controller.Slave, controller.Channel(self.masterq, self.should_exit) ) if self.options.rfile: ret = self.load_flows_path(self.options.rfile) if ret and self.state.flow_count(): signals.add_event( "File truncated or corrupted. " "Loaded as many flows as possible.", "error" ) elif ret and not self.state.flow_count(): self.shutdown() print >> sys.stderr, "Could not load file:", ret sys.exit(1) self.loop.set_alarm_in(0.01, self.ticker) # It's not clear why we need to handle this explicitly - without this, # mitmproxy hangs on keyboard interrupt. Remove if we ever figure it # out. def exit(s, f): raise urwid.ExitMainLoop signal.signal(signal.SIGINT, exit) self.loop.set_alarm_in( 0.0001, lambda *args: self.view_flowlist() ) try: self.loop.run() except Exception: self.loop.stop() sys.stdout.flush() print >> sys.stderr, traceback.format_exc() print >> sys.stderr, "mitmproxy has crashed!" print >> sys.stderr, "Please lodge a bug report at:" print >> sys.stderr, "\thttps://github.com/mitmproxy/mitmproxy" print >> sys.stderr, "Shutting down..." sys.stderr.flush() self.shutdown() def view_help(self, helpctx): signals.push_view_state.send( self, window = window.Window( self, help.HelpView(helpctx), None, statusbar.StatusBar(self, help.footer), None ) ) def view_options(self): for i in self.view_stack: if isinstance(i["body"], options.Options): return signals.push_view_state.send( self, window = window.Window( self, options.Options(self), None, statusbar.StatusBar(self, options.footer), options.help_context, ) ) def view_palette_picker(self): signals.push_view_state.send( self, window = window.Window( self, palettepicker.PalettePicker(self), None, statusbar.StatusBar(self, palettepicker.footer), palettepicker.help_context, ) ) def view_grideditor(self, ge): signals.push_view_state.send( self, window = window.Window( self, ge, None, statusbar.StatusBar(self, grideditor.FOOTER), ge.make_help() ) ) def view_flowlist(self): if self.ui.started: self.ui.clear() if self.state.follow_focus: self.state.set_focus(self.state.flow_count()) if self.eventlog: body = flowlist.BodyPile(self) else: body = flowlist.FlowListBox(self) signals.push_view_state.send( self, window = window.Window( self, body, None, statusbar.StatusBar(self, flowlist.footer), flowlist.help_context ) ) def view_flow(self, flow, tab_offset=0): self.state.set_focus_flow(flow) signals.push_view_state.send( self, window = window.Window( self, flowview.FlowView(self, self.state, flow, tab_offset), flowview.FlowViewHeader(self, flow), statusbar.StatusBar(self, flowview.footer), flowview.help_context ) ) def _write_flows(self, path, flows): if not path: return path = os.path.expanduser(path) try: f = file(path, "wb") fw = flow.FlowWriter(f) for i in flows: fw.add(i) f.close() except IOError as v: signals.status_message.send(message=v.strerror) def save_one_flow(self, path, flow): return self._write_flows(path, [flow]) def save_flows(self, path): return self._write_flows(path, self.state.view) def save_marked_flows(self, path): marked_flows = [] for f in self.state.view: if self.state.flow_marked(f): marked_flows.append(f) return self._write_flows(path, marked_flows) def load_flows_callback(self, path): if not path: return ret = self.load_flows_path(path) return ret or "Flows loaded from %s" % path def load_flows_path(self, path): reterr = None try: flow.FlowMaster.load_flows_file(self, path) except flow.FlowReadError as v: reterr = str(v) signals.flowlist_change.send(self) return reterr def accept_all(self): self.state.accept_all(self) def set_limit(self, txt): v = self.state.set_limit(txt) signals.flowlist_change.send(self) return v def set_intercept(self, txt): return self.state.set_intercept(txt) def change_default_display_mode(self, t): v = contentviews.get_by_shortcut(t) self.state.default_body_view = v self.refresh_focus() def edit_scripts(self, scripts): commands = [x[0] for x in scripts] # remove outer array if commands == [s.command for s in self.scripts]: return self.unload_scripts() for command in commands: self.load_script(command) signals.update_settings.send(self) def stop_client_playback_prompt(self, a): if a != "n": self.stop_client_playback() def stop_server_playback_prompt(self, a): if a != "n": self.stop_server_playback() def quit(self, a): if a != "n": raise urwid.ExitMainLoop def shutdown(self): self.state.killall(self) flow.FlowMaster.shutdown(self) def clear_flows(self): self.state.clear() signals.flowlist_change.send(self) def toggle_follow_flows(self): # toggle flow follow self.state.follow_focus = not self.state.follow_focus # jump to most recent flow if follow is now on if self.state.follow_focus: self.state.set_focus(self.state.flow_count()) signals.flowlist_change.send(self) def delete_flow(self, f): self.state.delete_flow(f) signals.flowlist_change.send(self) def refresh_focus(self): if self.state.view: signals.flow_change.send( self, flow = self.state.view[self.state.focus] ) def process_flow(self, f): if self.state.intercept and f.match( self.state.intercept) and not f.request.is_replay: f.intercept(self) else: f.reply() signals.flowlist_change.send(self) signals.flow_change.send(self, flow = f) def clear_events(self): self.eventlist[:] = [] # Handlers def handle_error(self, f): f = flow.FlowMaster.handle_error(self, f) if f: self.process_flow(f) return f def handle_request(self, f): f = flow.FlowMaster.handle_request(self, f) if f: self.process_flow(f) return f def handle_response(self, f): f = flow.FlowMaster.handle_response(self, f) if f: self.process_flow(f) return f
1
10,883
We should initialize the observation in the constructor of the Script class - otherwise, we don't have this feature for mitmdump.
mitmproxy-mitmproxy
py
@@ -172,9 +172,11 @@ class Index(IndexOpsMixin): raise ValueError('Names must be a list-like') internal = self._kdf._internal if len(internal.index_map) != len(names): + raise ValueError('Length of new names must be {}, got {}' .format(len(internal.index_map), len(names))) - names = [name if isinstance(name, tuple) else (name,) for name in names] + + names = [name if isinstance(name, (tuple, type(None))) else (name,) for name in names] self._kdf._internal = internal.copy(index_map=list(zip(internal.index_columns, names))) @property
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Wrappers for Indexes to behave similar to pandas Index, MultiIndex. """ from functools import partial from typing import Any, List, Optional, Tuple, Union import pandas as pd from pandas.api.types import is_list_like, is_interval_dtype, is_bool_dtype, \ is_categorical_dtype, is_integer_dtype, is_float_dtype, is_numeric_dtype, is_object_dtype from pyspark import sql as spark from pyspark.sql import functions as F from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.config import get_option from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.base import IndexOpsMixin from databricks.koalas.frame import DataFrame from databricks.koalas.internal import _InternalFrame from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, _MissingPandasLikeMultiIndex from databricks.koalas.series import Series from databricks.koalas.utils import name_like_string class Index(IndexOpsMixin): """ Koalas Index that corresponds to Pandas Index logically. This might hold Spark Column internally. :ivar _kdf: The parent dataframe :type _kdf: DataFrame :ivar _scol: Spark Column instance :type _scol: pyspark.Column See Also -------- MultiIndex : A multi-level, or hierarchical, Index. Examples -------- >>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, 3]).index Int64Index([1, 2, 3], dtype='int64') >>> ks.DataFrame({'a': [1, 2, 3]}, index=list('abc')).index Index(['a', 'b', 'c'], dtype='object') """ def __init__(self, kdf: DataFrame, scol: Optional[spark.Column] = None) -> None: if scol is None: scol = kdf._internal.index_scols[0] internal = kdf._internal.copy(scol=scol, data_columns=kdf._internal.index_columns, column_index=kdf._internal.index_names, column_index_names=None) IndexOpsMixin.__init__(self, internal, kdf) def _with_new_scol(self, scol: spark.Column) -> 'Index': """ Copy Koalas Index with the new Spark Column. :param scol: the new Spark Column :return: the copied Index """ return Index(self._kdf, scol) @property def size(self) -> int: """ Return an int representing the number of elements in this object. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats'], ... index=list('abcd')) >>> df.index.size 4 >>> df.set_index('dogs', append=True).index.size 4 """ return len(self._kdf) # type: ignore def to_pandas(self) -> pd.Index: """ Return a pandas Index. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats'], ... index=list('abcd')) >>> df['dogs'].index.to_pandas() Index(['a', 'b', 'c', 'd'], dtype='object') """ sdf = self._kdf._sdf.select(self._scol) internal = self._kdf._internal.copy( sdf=sdf, index_map=[(sdf.schema[0].name, self._kdf._internal.index_names[0])], data_columns=[], column_index=[], column_index_names=None) return DataFrame(internal)._to_internal_pandas().index toPandas = to_pandas @property def spark_type(self): """ Returns the data type as defined by Spark, as a Spark DataType object.""" return self.to_series().spark_type @property def has_duplicates(self) -> bool: """ If index has duplicates, return True, otherwise False. Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3]}, index=list('aac')) >>> kdf.index.has_duplicates True >>> kdf = ks.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')]) >>> kdf.index.has_duplicates False >>> kdf = ks.DataFrame({'a': [1, 2, 3]}, index=[list('aac'), list('eef')]) >>> kdf.index.has_duplicates True """ df = self._kdf._sdf.select(self._scol) col = df.columns[0] return df.select(F.count(col) != F.countDistinct(col)).first()[0] @property def name(self) -> Union[str, Tuple[str, ...]]: """Return name of the Index.""" return self.names[0] @name.setter def name(self, name: Union[str, Tuple[str, ...]]) -> None: self.names = [name] @property def names(self) -> List[Union[str, Tuple[str, ...]]]: """Return names of the Index.""" return [name if name is None or len(name) > 1 else name[0] for name in self._kdf._internal.index_names] @names.setter def names(self, names: List[Union[str, Tuple[str, ...]]]) -> None: if not is_list_like(names): raise ValueError('Names must be a list-like') internal = self._kdf._internal if len(internal.index_map) != len(names): raise ValueError('Length of new names must be {}, got {}' .format(len(internal.index_map), len(names))) names = [name if isinstance(name, tuple) else (name,) for name in names] self._kdf._internal = internal.copy(index_map=list(zip(internal.index_columns, names))) @property def nlevels(self) -> int: """ Number of levels in Index & MultiIndex. Examples -------- >>> kdf = ks.DataFrame({"a": [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name="idx")) >>> kdf.index.nlevels 1 >>> kdf = ks.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')]) >>> kdf.index.nlevels 2 """ return len(self._kdf._internal.index_columns) def rename(self, name: Union[str, Tuple[str, ...]], inplace: bool = False): """ Alter Index name. Able to set new names without level. Defaults to returning new index. Parameters ---------- name : label or list of labels Name(s) to set. inplace : boolean, default False Modifies the object directly, instead of creating a new Index. Returns ------- Index The same type as the caller or None if inplace is True. Examples -------- >>> df = ks.DataFrame({'a': ['A', 'C'], 'b': ['A', 'B']}, columns=['a', 'b']) >>> df.index.rename("c") Int64Index([0, 1], dtype='int64', name='c') >>> df.set_index("a", inplace=True) >>> df.index.rename("d") Index(['A', 'C'], dtype='object', name='d') You can also change the index name in place. >>> df.index.rename("e", inplace=True) Index(['A', 'C'], dtype='object', name='e') >>> df # doctest: +NORMALIZE_WHITESPACE b e A A C B """ index_columns = self._kdf._internal.index_columns assert len(index_columns) == 1 if isinstance(name, str): name = (name,) internal = self._kdf._internal.copy(index_map=[(index_columns[0], name)]) if inplace: self._kdf._internal = internal return self else: return Index(DataFrame(internal), self._scol) def to_series(self, name: Union[str, Tuple[str, ...]] = None) -> Series: """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats'], ... index=list('abcd')) >>> df['dogs'].index.to_series() a a b b c c d d Name: 0, dtype: object """ kdf = self._kdf scol = self._scol if name is not None: scol = scol.alias(name_like_string(name)) column_index = [None] if len(kdf._internal.index_map) > 1 else kdf._internal.index_names return Series(kdf._internal.copy(scol=scol, column_index=column_index, column_index_names=None), anchor=kdf) def is_boolean(self): """ Return if the current index type is a boolean type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=[True]).index.is_boolean() True """ return is_bool_dtype(self.dtype) def is_categorical(self): """ Return if the current index type is a categorical type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=[1]).index.is_categorical() False """ return is_categorical_dtype(self.dtype) def is_floating(self): """ Return if the current index type is a floating type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=[1]).index.is_floating() False """ return is_float_dtype(self.dtype) def is_integer(self): """ Return if the current index type is a integer type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=[1]).index.is_integer() True """ return is_integer_dtype(self.dtype) def is_interval(self): """ Return if the current index type is an interval type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=[1]).index.is_interval() False """ return is_interval_dtype(self.dtype) def is_numeric(self): """ Return if the current index type is a numeric type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=[1]).index.is_numeric() True """ return is_numeric_dtype(self.dtype) def is_object(self): """ Return if the current index type is a object type. Examples -------- >>> ks.DataFrame({'a': [1]}, index=["a"]).index.is_object() True """ return is_object_dtype(self.dtype) def unique(self, level=None): """ Return unique values in the index. Be aware the order of unique values might be different than pandas.Index.unique :param level: int or str, optional, default is None :return: Index without deuplicates Examples -------- >>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 1, 3]).index.unique() Int64Index([1, 3], dtype='int64') >>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=['d', 'e', 'e']).index.unique() Index(['e', 'd'], dtype='object') """ if level is not None: self._validate_index_level(level) sdf = self._kdf._sdf.select(self._scol.alias(self._internal.index_columns[0])).distinct() return DataFrame(_InternalFrame(sdf=sdf, index_map=self._kdf._internal.index_map)).index def _validate_index_level(self, level): """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level," " %d is not a valid level number" % (level,) ) elif level > 0: raise IndexError( "Too many levels:" " Index has only 1 level, not %d" % (level + 1) ) elif level != self.name: raise KeyError( "Requested level ({}) does not match index name ({})".format( level, self.name ) ) def copy(self, name=None): """ Make a copy of this object. name sets those attributes on the new object. Parameters ---------- name : string, optional to set name of index Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 >>> df.index Index(['cobra', 'viper', 'sidewinder'], dtype='object') Copy index >>> df.index.copy() Index(['cobra', 'viper', 'sidewinder'], dtype='object') Copy index with name >>> df.index.copy(name='snake') Index(['cobra', 'viper', 'sidewinder'], dtype='object', name='snake') """ internal = self._kdf._internal.copy() result = Index(ks.DataFrame(internal), self._scol) if name: result.name = name return result def __getattr__(self, item: str) -> Any: if hasattr(_MissingPandasLikeIndex, item): property_or_func = getattr(_MissingPandasLikeIndex, item) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) raise AttributeError("'Index' object has no attribute '{}'".format(item)) def __repr__(self): max_display_count = get_option("display.max_rows") if max_display_count is None: return repr(self.to_pandas()) pindex = self._kdf.head(max_display_count + 1).index._with_new_scol(self._scol).to_pandas() pindex_length = len(pindex) repr_string = repr(pindex[:max_display_count]) if pindex_length > max_display_count: footer = '\nShowing only the first {}'.format(max_display_count) return repr_string + footer return repr_string def __iter__(self): return _MissingPandasLikeIndex.__iter__(self) class MultiIndex(Index): """ Koalas MultiIndex that corresponds to Pandas MultiIndex logically. This might hold Spark Column internally. :ivar _kdf: The parent dataframe :type _kdf: DataFrame :ivar _scol: Spark Column instance :type _scol: pyspark.Column See Also -------- Index : A single-level Index. Examples -------- >>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index # doctest: +SKIP MultiIndex([(1, 4), (2, 5), (3, 6)], ) >>> ks.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')]).index # doctest: +SKIP MultiIndex([('a', 'd'), ('b', 'e'), ('c', 'f')], ) """ def __init__(self, kdf: DataFrame): assert len(kdf._internal._index_map) > 1 scol = F.struct(kdf._internal.index_scols) data_columns = kdf._sdf.select(scol).columns internal = kdf._internal.copy(scol=scol, column_index=[(col, None) for col in data_columns], column_index_names=None) IndexOpsMixin.__init__(self, internal, kdf) def any(self, *args, **kwargs): raise TypeError("cannot perform any with this index type: MultiIndex") def all(self, *args, **kwargs): raise TypeError("cannot perform all with this index type: MultiIndex") @property def name(self) -> str: raise PandasNotImplementedError(class_name='pd.MultiIndex', property_name='name') @name.setter def name(self, name: str) -> None: raise PandasNotImplementedError(class_name='pd.MultiIndex', property_name='name') def to_pandas(self) -> pd.MultiIndex: """ Return a pandas MultiIndex. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats'], ... index=[list('abcd'), list('efgh')]) >>> df['dogs'].index.to_pandas() # doctest: +SKIP MultiIndex([('a', 'e'), ('b', 'f'), ('c', 'g'), ('d', 'h')], ) """ # TODO: We might need to handle internal state change. # So far, we don't have any functions to change the internal state of MultiIndex except for # series-like operations. In that case, it creates new Index object instead of MultiIndex. return self._kdf[[]]._to_internal_pandas().index toPandas = to_pandas def unique(self, level=None): raise PandasNotImplementedError(class_name='MultiIndex', method_name='unique') # TODO: add 'name' parameter after pd.MultiIndex.name is implemented def copy(self): """ Make a copy of this object. """ internal = self._kdf._internal.copy() result = MultiIndex(ks.DataFrame(internal)) return result def __getattr__(self, item: str) -> Any: if hasattr(_MissingPandasLikeMultiIndex, item): property_or_func = getattr(_MissingPandasLikeMultiIndex, item) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) raise AttributeError("'MultiIndex' object has no attribute '{}'".format(item)) def rename(self, name, inplace=False): raise NotImplementedError() def __repr__(self): max_display_count = get_option("display.max_rows") if max_display_count is None: return repr(self.to_pandas()) pindex = self._kdf.head(max_display_count + 1).index.to_pandas() pindex_length = len(pindex) repr_string = repr(pindex[:max_display_count]) if pindex_length > max_display_count: footer = '\nShowing only the first {}'.format(max_display_count) return repr_string + footer return repr_string def __iter__(self): return _MissingPandasLikeMultiIndex.__iter__(self)
1
12,580
I would remove this line. Otherwise looks fine cc @ueshin
databricks-koalas
py
@@ -50,7 +50,7 @@ TEST (ledger_walker, genesis_account_longer) nano::ledger_walker ledger_walker{ node->ledger }; EXPECT_TRUE (ledger_walker.walked_blocks.empty ()); - EXPECT_EQ (1, ledger_walker.walked_blocks.bucket_count ()); + EXPECT_LE (ledger_walker.walked_blocks.bucket_count (), 1); EXPECT_TRUE (ledger_walker.blocks_to_walk.empty ()); const auto get_number_of_walked_blocks = [&ledger_walker] (const auto & start_block_hash) {
1
#include <nano/node/ledger_walker.hpp> #include <nano/test_common/system.hpp> #include <nano/test_common/testutil.hpp> #include <gtest/gtest.h> #include <numeric> // TODO: keep this until diskhash builds fine on Windows #ifndef _WIN32 using namespace std::chrono_literals; TEST (ledger_walker, genesis_block) { nano::system system{}; const auto node = system.add_node (); nano::ledger_walker ledger_walker{ node->ledger }; std::size_t walked_blocks_count = 0; ledger_walker.walk_backward (nano::genesis_hash, [&] (const auto & block) { ++walked_blocks_count; EXPECT_EQ (block->hash (), nano::genesis_hash); }); EXPECT_EQ (walked_blocks_count, 1); walked_blocks_count = 0; ledger_walker.walk (nano::genesis_hash, [&] (const auto & block) { ++walked_blocks_count; EXPECT_EQ (block->hash (), nano::genesis_hash); }); EXPECT_EQ (walked_blocks_count, 1); } namespace nano { TEST (ledger_walker, genesis_account_longer) { nano::system system{}; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = true; node_config.receive_minimum = 1; const auto node = system.add_node (node_config); nano::ledger_walker ledger_walker{ node->ledger }; EXPECT_TRUE (ledger_walker.walked_blocks.empty ()); EXPECT_EQ (1, ledger_walker.walked_blocks.bucket_count ()); EXPECT_TRUE (ledger_walker.blocks_to_walk.empty ()); const auto get_number_of_walked_blocks = [&ledger_walker] (const auto & start_block_hash) { std::size_t walked_blocks_count = 0; ledger_walker.walk_backward (start_block_hash, [&] (const auto & block) { ++walked_blocks_count; }); return walked_blocks_count; }; const auto transaction = node->ledger.store.tx_begin_read (); nano::account_info genesis_account_info{}; ASSERT_FALSE (node->ledger.store.account.get (transaction, nano::nano_dev_account, genesis_account_info)); EXPECT_EQ (get_number_of_walked_blocks (genesis_account_info.open_block), 1); EXPECT_EQ (get_number_of_walked_blocks (genesis_account_info.head), 1); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); for (auto itr = 1; itr <= 5; ++itr) { const auto send = system.wallet (0)->send_action (nano::dev_genesis_key.pub, nano::dev_genesis_key.pub, 1); ASSERT_TRUE (send); EXPECT_EQ (get_number_of_walked_blocks (send->hash ()), 1 + itr * 2 - 1); ASSERT_TIMELY (3s, 1 + itr * 2 == node->ledger.cache.cemented_count); ASSERT_FALSE (node->ledger.store.account.get (transaction, nano::nano_dev_account, genesis_account_info)); // TODO: check issue with account head // EXPECT_EQ(get_number_of_walked_blocks (genesis_account_info.head), 1 + itr * 2); } EXPECT_TRUE (ledger_walker.walked_blocks.empty ()); EXPECT_EQ (1, ledger_walker.walked_blocks.bucket_count ()); EXPECT_TRUE (ledger_walker.blocks_to_walk.empty ()); } } TEST (ledger_walker, cross_account) { nano::system system{}; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = true; node_config.receive_minimum = 1; const auto node = system.add_node (node_config); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_TRUE (system.wallet (0)->send_action (nano::dev_genesis_key.pub, nano::dev_genesis_key.pub, 1)); ASSERT_TIMELY (3s, 3 == node->ledger.cache.cemented_count); nano::keypair key{}; system.wallet (0)->insert_adhoc (key.prv); ASSERT_TRUE (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key.pub, 1)); ASSERT_TIMELY (3s, 5 == node->ledger.cache.cemented_count); const auto transaction = node->ledger.store.tx_begin_read (); nano::account_info account_info{}; ASSERT_FALSE (node->ledger.store.account.get (transaction, key.pub, account_info)); // TODO: check issue with account head // const auto first = node->ledger.store.block.get_no_sideband(transaction, account_info.head); // const auto second = node->ledger.store.block.get_no_sideband(transaction, first->previous()); // const auto third = node->ledger.store.block.get_no_sideband(transaction, second->previous()); // const auto fourth = node->ledger.store.block.get_no_sideband(transaction, third->previous()); // const auto fifth = node->ledger.store.block.get_no_sideband(transaction, fourth->previous()); // // const auto expected_blocks_to_walk = { first, second, third, fourth, fifth }; // auto expected_blocks_to_walk_itr = expected_blocks_to_walk.begin(); // // nano::ledger_walker ledger_walker{ node->ledger }; // ledger_walker.walk_backward (account_info.block_count, [&] (const auto & block) { // if (expected_blocks_to_walk_itr == expected_blocks_to_walk.end()) // { // EXPECT_TRUE(false); // return false; // } // // EXPECT_EQ((*expected_blocks_to_walk_itr++)->hash(), block->hash()); // return true; // }); // // EXPECT_EQ(expected_blocks_to_walk_itr, expected_blocks_to_walk.end()); } TEST (ledger_walker, ladder_geometry) { nano::system system{}; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = true; node_config.receive_minimum = 1; const auto node = system.add_node (node_config); std::array<nano::keypair, 3> keys{}; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); for (auto itr = 0; itr != keys.size (); ++itr) { system.wallet (0)->insert_adhoc (keys[itr].prv); const auto block = system.wallet (0)->send_action (nano::dev_genesis_key.pub, keys[itr].pub, 1000); ASSERT_TIMELY (3s, 1 + (itr + 1) * 2 == node->ledger.cache.cemented_count); } std::vector<nano::uint128_t> amounts_to_send (10); std::iota (amounts_to_send.begin (), amounts_to_send.end (), 1); const nano::account * last_destination{}; for (auto itr = 0; itr != amounts_to_send.size (); ++itr) { const auto source_index = itr % keys.size (); const auto destination_index = (source_index + 1) % keys.size (); last_destination = &keys[destination_index].pub; const auto send = system.wallet (0)->send_action (keys[source_index].pub, keys[destination_index].pub, amounts_to_send[itr]); ASSERT_TRUE (send); ASSERT_TIMELY (3s, 1 + keys.size () * 2 + (itr + 1) * 2 == node->ledger.cache.cemented_count); } ASSERT_TRUE (last_destination); const auto transaction = node->ledger.store.tx_begin_read (); nano::account_info last_destination_info{}; const auto last_destination_read_error = node->ledger.store.account.get (transaction, *last_destination, last_destination_info); ASSERT_FALSE (last_destination_read_error); // This is how we expect chains to look like (for 3 accounts and 10 amounts to be sent) // k1: 1000 SEND 3 SEND 6 SEND 9 SEND // k2: 1000 1 SEND 4 SEND 7 SEND 10 // k3: 1000 2 SEND 5 SEND 8 SEND std::vector<nano::uint128_t> amounts_expected_backwards{ 10, 9, 8, 5, 4, 3, 1000, 1, 1000, 2, 1000, 6, 7 }; auto amounts_expected_backwards_itr = amounts_expected_backwards.cbegin (); nano::ledger_walker ledger_walker{ node->ledger }; ledger_walker.walk_backward (last_destination_info.head, [&] (const auto & block) { if (block->sideband ().details.is_receive) { nano::amount previous_balance{}; if (!block->previous ().is_zero ()) { const auto previous_block = node->ledger.store.block.get_no_sideband (transaction, block->previous ()); previous_balance = previous_block->balance (); } EXPECT_EQ (*amounts_expected_backwards_itr++, block->balance ().number () - previous_balance.number ()); } }); EXPECT_EQ (amounts_expected_backwards_itr, amounts_expected_backwards.cend ()); auto amounts_expected_itr = amounts_expected_backwards.crbegin (); ledger_walker.walk (last_destination_info.head, [&] (const auto & block) { if (block->sideband ().details.is_receive) { nano::amount previous_balance{}; if (!block->previous ().is_zero ()) { const auto previous_block = node->ledger.store.block.get_no_sideband (transaction, block->previous ()); previous_balance = previous_block->balance (); } EXPECT_EQ (*amounts_expected_itr++, block->balance ().number () - previous_balance.number ()); } }); EXPECT_EQ (amounts_expected_itr, amounts_expected_backwards.crend ()); } #endif // _WIN32 -- TODO: keep this until diskhash builds fine on Windows
1
16,806
Would it be better to remove that line alltogether if it not directly relevant?
nanocurrency-nano-node
cpp
@@ -693,6 +693,13 @@ signal_arch_init(void) * compiler from optimizing it away. * XXX i#641, i#639: this breaks transparency to some extent until the * app uses fpu/xmm but we live with it. + * Given a security vulnerability and its mitigations, executing a fpu/xmm + * operation here might not be necessary anymore. The vulnerabilty, published on + * June 13, 2018, is CVE-2018-3665: "Lazy FPU Restore" schemes are vulnerable to + * the FPU state information leakage issue. The kernel-based mitigation was to + * automatically default to (safe) "eager" floating point register restore. In + * this mode FPU state is saved and restored for every task/context switch + * regardless of whether the current process invokes FPU instructions or not. */ __asm__ __volatile__("movd %%xmm0, %0" : "=g"(rc)); memset(&act, 0, sizeof(act));
1
/* ********************************************************** * Copyright (c) 2011-2019 Google, Inc. All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* * signal_linux_x86.c - Linux and X86 specific signal code */ #include "signal_private.h" /* pulls in globals.h for us, in right order */ #ifndef LINUX # error Linux-only #endif #ifndef X86 # error X86-only #endif #include "arch.h" /* We have to dynamically size kernel_xstate_t to account for kernel changes * over time. */ static size_t xstate_size; static bool xstate_has_extra_fields; #define XSTATE_QUERY_SIG SIGILL /**** floating point support ********************************************/ /* The following code is based on routines in * /usr/src/linux/arch/i386/kernel/i387.c * and definitions in * /usr/src/linux/include/asm-i386/processor.h * /usr/src/linux/include/asm-i386/i387.h */ struct i387_fsave_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ long status; /* software status information */ }; /* note that fxsave requires that i387_fxsave_struct be aligned on * a 16-byte boundary */ struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; #ifdef X64 long rip; long rdp; int mxcsr; int mxcsr_mask; int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ int xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ int padding[24]; #else long fip; long fcs; long foo; long fos; long mxcsr; long reserved; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; #endif } __attribute__((aligned(16))); union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; }; #ifndef X64 /* For 32-bit if we use fxsave we need to convert it to the kernel's struct. * For 64-bit the kernel's struct is identical to the fxsave format. */ static uint twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) { kernel_fpxreg_t *st = NULL; uint twd = (uint)fxsave->twd; uint tag; uint ret = 0xffff0000; int i; for (i = 0; i < 8; i++) { if (TEST(0x1, twd)) { st = (kernel_fpxreg_t *)&fxsave->st_space[i * 4]; switch (st->exponent & 0x7fff) { case 0x7fff: tag = 2; /* Special */ break; case 0x0000: if (st->significand[0] == 0 && st->significand[1] == 0 && st->significand[2] == 0 && st->significand[3] == 0) { tag = 1; /* Zero */ } else { tag = 2; /* Special */ } break; default: if (TEST(0x8000, st->significand[3])) { tag = 0; /* Valid */ } else { tag = 2; /* Special */ } break; } } else { tag = 3; /* Empty */ } ret |= (tag << (2 * i)); twd = twd >> 1; } return ret; } static void convert_fxsave_to_fpstate(kernel_fpstate_t *fpstate, struct i387_fxsave_struct *fxsave) { int i; fpstate->cw = (uint)fxsave->cwd | 0xffff0000; fpstate->sw = (uint)fxsave->swd | 0xffff0000; fpstate->tag = twd_fxsr_to_i387(fxsave); fpstate->ipoff = fxsave->fip; fpstate->cssel = fxsave->fcs | ((uint)fxsave->fop << 16); fpstate->dataoff = fxsave->foo; fpstate->datasel = fxsave->fos; for (i = 0; i < 8; i++) { memcpy(&fpstate->_st[i], &fxsave->st_space[i * 4], sizeof(fpstate->_st[i])); } fpstate->status = fxsave->swd; fpstate->magic = X86_FXSR_MAGIC; memcpy(&fpstate->_fxsr_env[0], fxsave, offsetof(struct i387_fxsave_struct, xmm_space)); } #endif /* !X64 */ static void save_xmm(dcontext_t *dcontext, sigframe_rt_t *frame) { /* The app's xmm registers may be saved away in priv_mcontext_t, in which * case we need to copy those values instead of using what was in * the physical xmm registers. * Because of this, we can't just execute "xsave". We still need to * execute xgetbv though. Xsave is very expensive so not worth doing * when xgetbv is all we need, so we avoid it unless there are extra fields. */ int i; sigcontext_t *sc = get_sigcontext_from_rt_frame(frame); kernel_xstate_t *xstate = (kernel_xstate_t *)sc->fpstate; if (!preserve_xmm_caller_saved()) return; if (xstate_has_extra_fields) { /* Fill in the extra fields first and then clobber xmm+ymm below. * We assume that DR's code does not touch this extra state. */ /* A processor w/o xsave but w/ extra xstate fields should not exist. */ ASSERT(proc_has_feature(FEATURE_XSAVE)); /* XXX i#1312: use xsaveopt if available (need to add FEATURE_XSAVEOPT) */ #ifdef X64 ASSERT(ALIGNED(xstate, AVX_ALIGNMENT)); /* Some assemblers, including on Travis, don't know "xsave64", so we * have to use raw bytes for: * 48 0f ae 21 xsave64 (%rcx) * We only enable the x87 state component. The rest of the user state components * get copied below from priv_mcontext_t. */ asm volatile("mov $0x0, %%edx\n\t" "mov $0x1, %%eax\n\t" "mov %0, %%rcx\n\t" ".byte 0x48\n\t" ".byte 0x0f\n\t" ".byte 0xae\n\t" ".byte 0x21\n" : : "m"(xstate) : "eax", "edx", "rcx", "memory"); #else # if DISABLED_ISSUE_3256 /* FIXME i#3256: DR's kernel_fpstate_t includes the fsave 112 bytes at the * top. We need to skip them to reach the xsave area at the _fxsr_env field. * However, that requires aligning that instead of the kernel_fpstate_t start * itself in sigpending_t and the frame we make on the app stack. An * alternative here is to copy into a temp buffer but that seems wasteful. * For now we skip the xsave, which seems safer than clobbering the wrong * fields, but is also buggy and can cause app data corruption. */ byte *xsave_start = (byte *)(&xstate->fpstate._fxsr_env[0]); ASSERT(ALIGNED(xsave_start, AVX_ALIGNMENT)); /* We only enable the x87 state component. The rest of the user state components * gets copied below from priv_mcontext_t. * FIXME i#1312: it is unclear if and how the components are arranged in * 32-bit mode by the kernel. In fact, if we enable more state components here * than this, we get a crash in linux.sigcontext. This needs clarification about * what the kernel does for 32-bit with the extended xsave area. * UPDATE from i#3256 analysis: Was this due to the incorrect xsave target? */ asm volatile("mov $0x0, %%edx\n\t" "mov $0x1, %%eax\n\t" "mov %0, %%ecx\n\t" ".byte 0x0f\n\t" ".byte 0xae\n\t" ".byte 0x21\n" : : "m"(xsave_start) : "eax", "edx", "ecx", "memory"); # endif #endif } if (YMM_ENABLED()) { /* all ymm regs are in our mcontext. the only other thing * in xstate is the xgetbv. */ uint bv_high, bv_low; dr_xgetbv(&bv_high, &bv_low); xstate->xstate_hdr.xstate_bv = (((uint64)bv_high) << 32) | bv_low; } for (i = 0; i < proc_num_simd_sse_avx_saved(); i++) { /* we assume no padding */ #ifdef X64 /* __u32 xmm_space[64] */ memcpy(&sc->fpstate->xmm_space[i * 4], &get_mcontext(dcontext)->simd[i], XMM_REG_SIZE); if (YMM_ENABLED()) { /* i#637: ymm top halves are inside kernel_xstate_t */ memcpy(&xstate->ymmh.ymmh_space[i * 4], ((void *)&get_mcontext(dcontext)->simd[i]) + YMMH_REG_SIZE, YMMH_REG_SIZE); } #else memcpy(&sc->fpstate->_xmm[i], &get_mcontext(dcontext)->simd[i], XMM_REG_SIZE); if (YMM_ENABLED()) { /* i#637: ymm top halves are inside kernel_xstate_t */ memcpy(&xstate->ymmh.ymmh_space[i * 4], ((void *)&get_mcontext(dcontext)->simd[i]) + YMMH_REG_SIZE, YMMH_REG_SIZE); } #endif #ifdef X64 if (ZMM_ENABLED()) { memcpy((byte *)xstate + proc_xstate_area_zmm_hi256_offs() + i * ZMMH_REG_SIZE, ((void *)&get_mcontext(dcontext)->simd[i]) + ZMMH_REG_SIZE, ZMMH_REG_SIZE); ASSERT(proc_num_simd_sse_avx_saved() == proc_num_simd_registers() - proc_num_simd_sse_avx_saved()); memcpy((byte *)xstate + proc_xstate_area_hi16_zmm_offs() + i * ZMM_REG_SIZE, ((void *)&get_mcontext(dcontext) ->simd[i + proc_num_simd_sse_avx_saved()]), ZMM_REG_SIZE); } #else /* FIXME i#1312: it is unclear if and how the components are arranged in * 32-bit mode by the kernel. */ #endif } #ifdef X64 if (ZMM_ENABLED()) { for (i = 0; i < proc_num_opmask_registers(); i++) { memcpy((byte *)xstate + proc_xstate_area_kmask_offs() + i * OPMASK_AVX512BW_REG_SIZE, ((void *)&get_mcontext(dcontext)->opmask[i]), OPMASK_AVX512BW_REG_SIZE); } } #endif } /* We can't tell whether the app has used fpstate yet so we preserve every time * (i#641 covers optimizing that) */ void save_fpstate(dcontext_t *dcontext, sigframe_rt_t *frame) { /* The compiler may not be able to properly align stack variables even with * __attribute__((aligned()). We maintain this array to enforce alignment. */ char align[sizeof(union i387_union) + 16] __attribute__((aligned(16))); union i387_union *temp = (union i387_union *)((((ptr_uint_t)align) + 16) & ((ptr_uint_t)-16)); sigcontext_t *sc = get_sigcontext_from_rt_frame(frame); LOG(THREAD, LOG_ASYNCH, 3, "save_fpstate\n"); if (sc->fpstate == NULL) { /* Nothing to do: there was no fpstate to save at the time the kernel * gave us this frame. * It's possible that by the time we deliver the signal * there is some state: but it's up to the caller to set up room * for fpstate and point at it in that case. */ return; } else { LOG(THREAD, LOG_ASYNCH, 3, "ptr=" PFX "\n", sc->fpstate); } if (proc_has_feature(FEATURE_FXSR)) { LOG(THREAD, LOG_ASYNCH, 3, "\ttemp=" PFX "\n", temp); #ifdef X64 /* this is "unlazy_fpu" */ /* fxsaveq is only supported with gas >= 2.16 but we have that */ asm volatile("fxsaveq %0 ; fnclex" : "=m"(temp->fxsave)); /* now convert into kernel_fpstate_t form */ ASSERT(sizeof(kernel_fpstate_t) == sizeof(struct i387_fxsave_struct)); memcpy(sc->fpstate, &temp->fxsave, offsetof(struct i387_fxsave_struct, xmm_space)); #else /* this is "unlazy_fpu" */ asm volatile("fxsave %0 ; fnclex" : "=m"(temp->fxsave)); /* now convert into kernel_fpstate_t form */ convert_fxsave_to_fpstate(sc->fpstate, &temp->fxsave); #endif } else { /* FIXME NYI: need to convert to fxsave format for sc->fpstate */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* this is "unlazy_fpu" */ asm volatile("fnsave %0 ; fwait" : "=m"(temp->fsave)); /* now convert into kernel_fpstate_t form */ temp->fsave.status = temp->fsave.swd; memcpy(sc->fpstate, &temp->fsave, sizeof(struct i387_fsave_struct)); } save_xmm(dcontext, frame); } #ifdef DEBUG static void dump_fpstate(dcontext_t *dcontext, kernel_fpstate_t *fp) { int i, j; # ifdef X64 LOG(THREAD, LOG_ASYNCH, 1, "\tcwd=" PFX "\n", fp->cwd); LOG(THREAD, LOG_ASYNCH, 1, "\tswd=" PFX "\n", fp->swd); LOG(THREAD, LOG_ASYNCH, 1, "\ttwd=" PFX "\n", fp->twd); LOG(THREAD, LOG_ASYNCH, 1, "\tfop=" PFX "\n", fp->fop); LOG(THREAD, LOG_ASYNCH, 1, "\trip=" PFX "\n", fp->rip); LOG(THREAD, LOG_ASYNCH, 1, "\trdp=" PFX "\n", fp->rdp); LOG(THREAD, LOG_ASYNCH, 1, "\tmxcsr=" PFX "\n", fp->mxcsr); LOG(THREAD, LOG_ASYNCH, 1, "\tmxcsr_mask=" PFX "\n", fp->mxcsr_mask); for (i = 0; i < 8; i++) { LOG(THREAD, LOG_ASYNCH, 1, "\tst%d = 0x", i); for (j = 0; j < 4; j++) { LOG(THREAD, LOG_ASYNCH, 1, "%08x", fp->st_space[i * 4 + j]); } LOG(THREAD, LOG_ASYNCH, 1, "\n"); } for (i = 0; i < 16; i++) { LOG(THREAD, LOG_ASYNCH, 1, "\txmm%d = 0x", i); for (j = 0; j < 4; j++) { LOG(THREAD, LOG_ASYNCH, 1, "%08x", fp->xmm_space[i * 4 + j]); } LOG(THREAD, LOG_ASYNCH, 1, "\n"); } # else LOG(THREAD, LOG_ASYNCH, 1, "\tcw=" PFX "\n", fp->cw); LOG(THREAD, LOG_ASYNCH, 1, "\tsw=" PFX "\n", fp->sw); LOG(THREAD, LOG_ASYNCH, 1, "\ttag=" PFX "\n", fp->tag); LOG(THREAD, LOG_ASYNCH, 1, "\tipoff=" PFX "\n", fp->ipoff); LOG(THREAD, LOG_ASYNCH, 1, "\tcssel=" PFX "\n", fp->cssel); LOG(THREAD, LOG_ASYNCH, 1, "\tdataoff=" PFX "\n", fp->dataoff); LOG(THREAD, LOG_ASYNCH, 1, "\tdatasel=" PFX "\n", fp->datasel); for (i = 0; i < 8; i++) { LOG(THREAD, LOG_ASYNCH, 1, "\tst%d = ", i); for (j = 0; j < 4; j++) LOG(THREAD, LOG_ASYNCH, 1, "%04x ", fp->_st[i].significand[j]); LOG(THREAD, LOG_ASYNCH, 1, "^ %04x\n", fp->_st[i].exponent); } LOG(THREAD, LOG_ASYNCH, 1, "\tstatus=0x%04x\n", fp->status); LOG(THREAD, LOG_ASYNCH, 1, "\tmagic=0x%04x\n", fp->magic); /* FXSR FPU environment */ for (i = 0; i < 6; i++) LOG(THREAD, LOG_ASYNCH, 1, "\tfxsr_env[%d] = " PFX "\n", i, fp->_fxsr_env[i]); LOG(THREAD, LOG_ASYNCH, 1, "\tmxcsr=" PFX "\n", fp->mxcsr); LOG(THREAD, LOG_ASYNCH, 1, "\treserved=" PFX "\n", fp->reserved); for (i = 0; i < 8; i++) { LOG(THREAD, LOG_ASYNCH, 1, "\tfxsr_st%d = ", i); for (j = 0; j < 4; j++) LOG(THREAD, LOG_ASYNCH, 1, "%04x ", fp->_fxsr_st[i].significand[j]); LOG(THREAD, LOG_ASYNCH, 1, "^ %04x\n", fp->_fxsr_st[i].exponent); /* ignore padding */ } for (i = 0; i < 8; i++) { LOG(THREAD, LOG_ASYNCH, 1, "\txmm%d = ", i); for (j = 0; j < 4; j++) LOG(THREAD, LOG_ASYNCH, 1, "%08x ", fp->_xmm[i].element[j]); LOG(THREAD, LOG_ASYNCH, 1, "\n"); } # endif /* Ignore padding. */ if (YMM_ENABLED()) { kernel_xstate_t *xstate = (kernel_xstate_t *)fp; if (fp->sw_reserved.magic1 == FP_XSTATE_MAGIC1) { /* i#718: for 32-bit app on 64-bit OS, the xstate_size in sw_reserved * is obtained via cpuid, which is the xstate size of 64-bit arch. */ ASSERT(fp->sw_reserved.extended_size >= sizeof(*xstate)); ASSERT(TEST(XCR0_AVX, fp->sw_reserved.xstate_bv)); LOG(THREAD, LOG_ASYNCH, 1, "\txstate_bv = 0x" HEX64_FORMAT_STRING "\n", xstate->xstate_hdr.xstate_bv); for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { LOG(THREAD, LOG_ASYNCH, 1, "\tymmh%d = ", i); for (j = 0; j < 4; j++) { LOG(THREAD, LOG_ASYNCH, 1, "%08x", xstate->ymmh.ymmh_space[i * 4 + j]); } LOG(THREAD, LOG_ASYNCH, 1, "\n"); } } } /* XXX i#1312: Dumping AVX-512 extended registers missing yet. */ } void dump_sigcontext(dcontext_t *dcontext, sigcontext_t *sc) { LOG(THREAD, LOG_ASYNCH, 1, "\tgs=0x%04x" IF_NOT_X64(", __gsh=0x%04x") "\n", sc->gs _IF_NOT_X64(sc->__gsh)); LOG(THREAD, LOG_ASYNCH, 1, "\tfs=0x%04x" IF_NOT_X64(", __fsh=0x%04x") "\n", sc->fs _IF_NOT_X64(sc->__fsh)); # ifndef X64 LOG(THREAD, LOG_ASYNCH, 1, "\tes=0x%04x, __esh=0x%04x\n", sc->es, sc->__esh); LOG(THREAD, LOG_ASYNCH, 1, "\tds=0x%04x, __dsh=0x%04x\n", sc->ds, sc->__dsh); # endif LOG(THREAD, LOG_ASYNCH, 1, "\txdi=" PFX "\n", sc->SC_XDI); LOG(THREAD, LOG_ASYNCH, 1, "\txsi=" PFX "\n", sc->SC_XSI); LOG(THREAD, LOG_ASYNCH, 1, "\txbp=" PFX "\n", sc->SC_XBP); LOG(THREAD, LOG_ASYNCH, 1, "\txsp=" PFX "\n", sc->SC_XSP); LOG(THREAD, LOG_ASYNCH, 1, "\txbx=" PFX "\n", sc->SC_XBX); LOG(THREAD, LOG_ASYNCH, 1, "\txdx=" PFX "\n", sc->SC_XDX); LOG(THREAD, LOG_ASYNCH, 1, "\txcx=" PFX "\n", sc->SC_XCX); LOG(THREAD, LOG_ASYNCH, 1, "\txax=" PFX "\n", sc->SC_XAX); # ifdef X64 LOG(THREAD, LOG_ASYNCH, 1, "\t r8=" PFX "\n", sc->r8); LOG(THREAD, LOG_ASYNCH, 1, "\t r9=" PFX "\n", sc->r9); LOG(THREAD, LOG_ASYNCH, 1, "\tr10=" PFX "\n", sc->r10); LOG(THREAD, LOG_ASYNCH, 1, "\tr11=" PFX "\n", sc->r11); LOG(THREAD, LOG_ASYNCH, 1, "\tr12=" PFX "\n", sc->r12); LOG(THREAD, LOG_ASYNCH, 1, "\tr13=" PFX "\n", sc->r13); LOG(THREAD, LOG_ASYNCH, 1, "\tr14=" PFX "\n", sc->r14); LOG(THREAD, LOG_ASYNCH, 1, "\tr15=" PFX "\n", sc->r15); # endif LOG(THREAD, LOG_ASYNCH, 1, "\ttrapno=" PFX "\n", sc->trapno); LOG(THREAD, LOG_ASYNCH, 1, "\terr=" PFX "\n", sc->err); LOG(THREAD, LOG_ASYNCH, 1, "\txip=" PFX "\n", sc->SC_XIP); LOG(THREAD, LOG_ASYNCH, 1, "\tcs=0x%04x" IF_NOT_X64(", __esh=0x%04x") "\n", sc->cs _IF_NOT_X64(sc->__csh)); LOG(THREAD, LOG_ASYNCH, 1, "\teflags=" PFX "\n", sc->SC_XFLAGS); # ifndef X64 LOG(THREAD, LOG_ASYNCH, 1, "\tesp_at_signal=" PFX "\n", sc->esp_at_signal); LOG(THREAD, LOG_ASYNCH, 1, "\tss=0x%04x, __ssh=0x%04x\n", sc->ss, sc->__ssh); # endif if (sc->fpstate == NULL) LOG(THREAD, LOG_ASYNCH, 1, "\tfpstate=<NULL>\n"); else dump_fpstate(dcontext, sc->fpstate); LOG(THREAD, LOG_ASYNCH, 1, "\toldmask=" PFX "\n", sc->oldmask); LOG(THREAD, LOG_ASYNCH, 1, "\tcr2=" PFX "\n", sc->cr2); } #endif /* DEBUG */ void sigcontext_to_mcontext_simd(priv_mcontext_t *mc, sig_full_cxt_t *sc_full) { sigcontext_t *sc = sc_full->sc; if (sc->fpstate != NULL) { int i; for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { memcpy(&mc->simd[i], &sc->fpstate->IF_X64_ELSE(xmm_space[i * 4], _xmm[i]), XMM_REG_SIZE); } if (YMM_ENABLED()) { kernel_xstate_t *xstate = (kernel_xstate_t *)sc->fpstate; if (sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1) { /* i#718: for 32-bit app on 64-bit OS, the xstate_size in sw_reserved * is obtained via cpuid, which is the xstate size of 64-bit arch. */ ASSERT(sc->fpstate->sw_reserved.extended_size >= sizeof(*xstate)); ASSERT(TEST(XCR0_AVX, sc->fpstate->sw_reserved.xstate_bv)); for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { memcpy(&mc->simd[i].u32[4], &xstate->ymmh.ymmh_space[i * 4], YMMH_REG_SIZE); } } } #ifdef X64 if (ZMM_ENABLED()) { kernel_xstate_t *xstate = (kernel_xstate_t *)sc->fpstate; if (sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1) { /* The following three XCR0 bits should have been checked already * in ZMM_ENABLED(). */ ASSERT(TEST(XCR0_ZMM_HI256, sc->fpstate->sw_reserved.xstate_bv)); ASSERT(TEST(XCR0_HI16_ZMM, sc->fpstate->sw_reserved.xstate_bv)); ASSERT(TEST(XCR0_OPMASK, sc->fpstate->sw_reserved.xstate_bv)); for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { memcpy(&mc->simd[i].u32[8], (byte *)xstate + proc_xstate_area_zmm_hi256_offs() + i * ZMMH_REG_SIZE, ZMMH_REG_SIZE); ASSERT(proc_num_simd_sse_avx_saved() == proc_num_simd_registers() - proc_num_simd_sse_avx_saved()); memcpy(&mc->simd[i + proc_num_simd_sse_avx_saved()], (byte *)xstate + proc_xstate_area_hi16_zmm_offs() + i * ZMM_REG_SIZE, ZMM_REG_SIZE); } ASSERT(TEST(XCR0_OPMASK, sc->fpstate->sw_reserved.xstate_bv)); for (i = 0; i < proc_num_opmask_registers(); i++) { memcpy(&mc->opmask[i], (byte *)xstate + proc_xstate_area_kmask_offs() + i * OPMASK_AVX512BW_REG_SIZE, OPMASK_AVX512BW_REG_SIZE); } } } #else /* FIXME i#1312: it is unclear if and how the components are arranged in * 32-bit mode by the kernel. */ #endif } } void mcontext_to_sigcontext_simd(sig_full_cxt_t *sc_full, priv_mcontext_t *mc) { sigcontext_t *sc = sc_full->sc; if (sc->fpstate != NULL) { int i; for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { memcpy(&sc->fpstate->IF_X64_ELSE(xmm_space[i * 4], _xmm[i]), &mc->simd[i], XMM_REG_SIZE); } if (YMM_ENABLED()) { kernel_xstate_t *xstate = (kernel_xstate_t *)sc->fpstate; if (sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1) { /* i#718: for 32-bit app on 64-bit OS, the xstate_size in sw_reserved * is obtained via cpuid, which is the xstate size of 64-bit arch. */ ASSERT(sc->fpstate->sw_reserved.extended_size >= sizeof(*xstate)); ASSERT(TEST(XCR0_AVX, sc->fpstate->sw_reserved.xstate_bv)); for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { memcpy(&xstate->ymmh.ymmh_space[i * 4], &mc->simd[i].u32[4], YMMH_REG_SIZE); } } } #ifdef X64 if (ZMM_ENABLED()) { kernel_xstate_t *xstate = (kernel_xstate_t *)sc->fpstate; if (sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1) { /* The following three XCR0 bits should have been checked already * in ZMM_ENABLED(). */ ASSERT(TEST(XCR0_ZMM_HI256, sc->fpstate->sw_reserved.xstate_bv)); ASSERT(TEST(XCR0_HI16_ZMM, sc->fpstate->sw_reserved.xstate_bv)); ASSERT(TEST(XCR0_OPMASK, sc->fpstate->sw_reserved.xstate_bv)); for (i = 0; i < proc_num_simd_sse_avx_registers(); i++) { memcpy((byte *)xstate + proc_xstate_area_zmm_hi256_offs() + i * ZMMH_REG_SIZE, &mc->simd[i].u32[8], ZMMH_REG_SIZE); ASSERT(proc_num_simd_sse_avx_registers() == proc_num_simd_registers() - proc_num_simd_sse_avx_registers()); memcpy((byte *)xstate + proc_xstate_area_hi16_zmm_offs() + i * ZMM_REG_SIZE, &mc->simd[i + proc_num_simd_sse_avx_registers()], ZMM_REG_SIZE); } ASSERT(TEST(XCR0_OPMASK, sc->fpstate->sw_reserved.xstate_bv)); for (i = 0; i < proc_num_opmask_registers(); i++) { memcpy((byte *)xstate + proc_xstate_area_kmask_offs() + i * OPMASK_AVX512BW_REG_SIZE, &mc->opmask[i], OPMASK_AVX512BW_REG_SIZE); } } } #else /* FIXME i#1312: it is unclear if and how the components are arranged in * 32-bit mode by the kernel. */ #endif } } size_t signal_frame_extra_size(bool include_alignment) { /* Extra space needed to put the signal frame on the app stack. We include the * size of the extra padding potentially needed to align these structs. We * assume the stack pointer is 4-aligned already, so we over estimate padding * size by the alignment minus 4. */ ASSERT(YMM_ENABLED() || !ZMM_ENABLED()); size_t size = YMM_ENABLED() ? xstate_size : sizeof(kernel_fpstate_t); if (include_alignment) size += (YMM_ENABLED() ? AVX_ALIGNMENT : FPSTATE_ALIGNMENT) - 4; return size; } /* To handle varying xstate sizes as kernels add more state over time, we query * the size by sending ourselves a signal at init time and reading what the * kernel saved. We assume that DR's own code does not touch this state, so * that we can update it to the app's latest at delivery time by executing * xsave in save_xmm(). * * XXX: If the kernel ever does lazy state saving for any part of the new state * and that affects the size, like it does with fpstate, this initial signal * state may not match later state. Currently it seems to be all-or-nothing. */ static void xstate_query_signal_handler(int sig, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt) { ASSERT_CURIOSITY(sig == XSTATE_QUERY_SIG); if (sig == XSTATE_QUERY_SIG) { sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt); if (YMM_ENABLED() && sc->fpstate != NULL) { ASSERT_CURIOSITY(sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1); LOG(GLOBAL, LOG_ASYNCH, 1, "orig xstate size = " SZFMT "\n", xstate_size); if (sc->fpstate->sw_reserved.extended_size != xstate_size) { xstate_size = sc->fpstate->sw_reserved.extended_size; xstate_has_extra_fields = true; } LOG(GLOBAL, LOG_ASYNCH, 1, "new xstate size = " SZFMT "\n", xstate_size); } else { /* i#2438: we force-initialized xmm state in signal_arch_init(). * But, on WSL it's still NULL (i#1896) so we make this just a curiosity * until we've tackled signals on WSL. */ ASSERT_CURIOSITY(sc->fpstate != NULL); } } } void signal_arch_init(void) { xstate_size = sizeof(kernel_xstate_t) + FP_XSTATE_MAGIC2_SIZE; ASSERT(YMM_ENABLED() || !ZMM_ENABLED()); if (YMM_ENABLED() && !standalone_library /* avoid SIGILL for standalone */) { kernel_sigaction_t act, oldact; int rc; /* i#2438: it's possible that our init code to this point has not yet executed * fpu or xmm operations and that thus fpstate will be NULL. We force it * with an explicit xmm ref here. We mark it "asm volatile" to prevent the * compiler from optimizing it away. * XXX i#641, i#639: this breaks transparency to some extent until the * app uses fpu/xmm but we live with it. */ __asm__ __volatile__("movd %%xmm0, %0" : "=g"(rc)); memset(&act, 0, sizeof(act)); set_handler_sigact(&act, XSTATE_QUERY_SIG, (handler_t)xstate_query_signal_handler); rc = sigaction_syscall(XSTATE_QUERY_SIG, &act, &oldact); ASSERT(rc == 0); thread_signal(get_process_id(), get_sys_thread_id(), XSTATE_QUERY_SIG); rc = sigaction_syscall(XSTATE_QUERY_SIG, &oldact, NULL); ASSERT(rc == 0); } }
1
23,309
Does "FPU" here include XMM/YMM/ZMM SIMD and not just x87 FPU/MMX?
DynamoRIO-dynamorio
c
@@ -226,7 +226,7 @@ class TokValueFile(Token): return generators.FileGenerator(s) def spec(self): - return "<'%s'" % strutils.bytes_to_escaped_str(self.path) + return "<'%s'" % self.path TokValue = pp.MatchFirst(
1
import operator import os import abc import pyparsing as pp import six from six.moves import reduce from netlib import strutils from netlib import human from . import generators, exceptions class Settings(object): def __init__( self, is_client=False, staticdir=None, unconstrained_file_access=False, request_host=None, websocket_key=None, protocol=None, ): self.is_client = is_client self.staticdir = staticdir self.unconstrained_file_access = unconstrained_file_access self.request_host = request_host self.websocket_key = websocket_key # TODO: refactor this into the protocol self.protocol = protocol Sep = pp.Optional(pp.Literal(":")).suppress() v_integer = pp.Word(pp.nums)\ .setName("integer")\ .setParseAction(lambda toks: int(toks[0])) v_literal = pp.MatchFirst( [ pp.QuotedString( "\"", unquoteResults=True, multiline=True ), pp.QuotedString( "'", unquoteResults=True, multiline=True ), ] ) v_naked_literal = pp.MatchFirst( [ v_literal, pp.Word("".join(i for i in pp.printables if i not in ",:\n@\'\"")) ] ) class Token(object): """ A token in the specification language. Tokens are immutable. The token classes have no meaning in and of themselves, and are combined into Components and Actions to build the language. """ __metaclass__ = abc.ABCMeta @classmethod def expr(cls): # pragma: no cover """ A parse expression. """ return None @abc.abstractmethod def spec(self): # pragma: no cover """ A parseable specification for this token. """ return None @property def unique_name(self): """ Controls uniqueness constraints for tokens. No two tokens with the same name will be allowed. If no uniquness should be applied, this should be None. """ return self.__class__.__name__.lower() def resolve(self, settings_, msg_): """ Resolves this token to ready it for transmission. This means that the calculated offsets of actions are fixed. settings: a language.Settings instance msg: The containing message """ return self def __repr__(self): return self.spec() class _TokValueLiteral(Token): def __init__(self, val): self.val = strutils.escaped_str_to_bytes(val) def get_generator(self, settings_): return self.val def freeze(self, settings_): return self class TokValueLiteral(_TokValueLiteral): """ A literal with Python-style string escaping """ @classmethod def expr(cls): e = v_literal.copy() return e.setParseAction(cls.parseAction) @classmethod def parseAction(cls, x): v = cls(*x) return v def spec(self): inner = strutils.bytes_to_escaped_str(self.val) inner = inner.replace(r"\'", r"\x27") return "'" + inner + "'" class TokValueNakedLiteral(_TokValueLiteral): @classmethod def expr(cls): e = v_naked_literal.copy() return e.setParseAction(lambda x: cls(*x)) def spec(self): return strutils.bytes_to_escaped_str(self.val) class TokValueGenerate(Token): def __init__(self, usize, unit, datatype): if not unit: unit = "b" self.usize, self.unit, self.datatype = usize, unit, datatype def bytes(self): return self.usize * human.SIZE_UNITS[self.unit] def get_generator(self, settings_): return generators.RandomGenerator(self.datatype, self.bytes()) def freeze(self, settings): g = self.get_generator(settings) return TokValueLiteral(strutils.bytes_to_escaped_str(g[:])) @classmethod def expr(cls): e = pp.Literal("@").suppress() + v_integer u = reduce( operator.or_, [pp.Literal(i) for i in human.SIZE_UNITS.keys()] ).leaveWhitespace() e = e + pp.Optional(u, default=None) s = pp.Literal(",").suppress() s += reduce( operator.or_, [pp.Literal(i) for i in generators.DATATYPES.keys()] ) e += pp.Optional(s, default="bytes") return e.setParseAction(lambda x: cls(*x)) def spec(self): s = "@%s" % self.usize if self.unit != "b": s += self.unit if self.datatype != "bytes": s += ",%s" % self.datatype return s class TokValueFile(Token): def __init__(self, path): self.path = str(path) @classmethod def expr(cls): e = pp.Literal("<").suppress() e = e + v_naked_literal return e.setParseAction(lambda x: cls(*x)) def freeze(self, settings_): return self def get_generator(self, settings): if not settings.staticdir: raise exceptions.FileAccessDenied("File access disabled.") s = os.path.expanduser(self.path) s = os.path.normpath( os.path.abspath(os.path.join(settings.staticdir, s)) ) uf = settings.unconstrained_file_access if not uf and not s.startswith(settings.staticdir): raise exceptions.FileAccessDenied( "File access outside of configured directory" ) if not os.path.isfile(s): raise exceptions.FileAccessDenied("File not readable") return generators.FileGenerator(s) def spec(self): return "<'%s'" % strutils.bytes_to_escaped_str(self.path) TokValue = pp.MatchFirst( [ TokValueGenerate.expr(), TokValueFile.expr(), TokValueLiteral.expr() ] ) TokNakedValue = pp.MatchFirst( [ TokValueGenerate.expr(), TokValueFile.expr(), TokValueLiteral.expr(), TokValueNakedLiteral.expr(), ] ) TokOffset = pp.MatchFirst( [ v_integer, pp.Literal("r"), pp.Literal("a") ] ) class _Component(Token): """ A value component of the primary specification of an message. Components produce byte values desribe the bytes of the message. """ def values(self, settings): # pragma: no cover """ A sequence of values, which can either be strings or generators. """ pass def string(self, settings=None): """ A string representation of the object. """ return "".join(i[:] for i in self.values(settings or {})) class KeyValue(_Component): """ A key/value pair. cls.preamble: leader """ def __init__(self, key, value): self.key, self.value = key, value @classmethod def expr(cls): e = pp.Literal(cls.preamble).suppress() e += TokValue e += pp.Literal("=").suppress() e += TokValue return e.setParseAction(lambda x: cls(*x)) def spec(self): return "%s%s=%s" % (self.preamble, self.key.spec(), self.value.spec()) def freeze(self, settings): return self.__class__( self.key.freeze(settings), self.value.freeze(settings) ) class CaselessLiteral(_Component): """ A caseless token that can take only one value. """ def __init__(self, value): self.value = value @classmethod def expr(cls): spec = pp.CaselessLiteral(cls.TOK) spec = spec.setParseAction(lambda x: cls(*x)) return spec def values(self, settings): return self.TOK def spec(self): return self.TOK def freeze(self, settings_): return self class OptionsOrValue(_Component): """ Can be any of a specified set of options, or a value specifier. """ preamble = "" options = [] def __init__(self, value): # If it's a string, we were passed one of the options, so we lower-case # it to be canonical. The user can specify a different case by using a # string value literal. self.option_used = False if isinstance(value, six.string_types): for i in self.options: # Find the exact option value in a case-insensitive way if i.lower() == value.lower(): self.option_used = True value = TokValueLiteral(i) break self.value = value @classmethod def expr(cls): parts = [pp.CaselessLiteral(i) for i in cls.options] m = pp.MatchFirst(parts) spec = m | TokValue.copy() spec = spec.setParseAction(lambda x: cls(*x)) if cls.preamble: spec = pp.Literal(cls.preamble).suppress() + spec return spec def values(self, settings): return [ self.value.get_generator(settings) ] def spec(self): s = self.value.spec() if s[1:-1].lower() in self.options: s = s[1:-1].lower() return "%s%s" % (self.preamble, s) def freeze(self, settings): return self.__class__(self.value.freeze(settings)) class Integer(_Component): bounds = (None, None) preamble = "" def __init__(self, value): v = int(value) outofbounds = any([ self.bounds[0] is not None and v < self.bounds[0], self.bounds[1] is not None and v > self.bounds[1] ]) if outofbounds: raise exceptions.ParseException( "Integer value must be between %s and %s." % self.bounds, 0, 0 ) self.value = str(value) @classmethod def expr(cls): e = v_integer.copy() if cls.preamble: e = pp.Literal(cls.preamble).suppress() + e return e.setParseAction(lambda x: cls(*x)) def values(self, settings): return self.value def spec(self): return "%s%s" % (self.preamble, self.value) def freeze(self, settings_): return self class Value(_Component): """ A value component lead by an optional preamble. """ preamble = "" def __init__(self, value): self.value = value @classmethod def expr(cls): e = (TokValue | TokNakedValue) if cls.preamble: e = pp.Literal(cls.preamble).suppress() + e return e.setParseAction(lambda x: cls(*x)) def values(self, settings): return [self.value.get_generator(settings)] def spec(self): return "%s%s" % (self.preamble, self.value.spec()) def freeze(self, settings): return self.__class__(self.value.freeze(settings)) class FixedLengthValue(Value): """ A value component lead by an optional preamble. """ preamble = "" length = None def __init__(self, value): Value.__init__(self, value) lenguess = None try: lenguess = len(value.get_generator(Settings())) except exceptions.RenderError: pass # This check will fail if we know the length upfront if lenguess is not None and lenguess != self.length: raise exceptions.RenderError( "Invalid value length: '%s' is %s bytes, should be %s." % ( self.spec(), lenguess, self.length ) ) def values(self, settings): ret = Value.values(self, settings) l = sum(len(i) for i in ret) # This check will fail if we don't know the length upfront - i.e. for # file inputs if l != self.length: raise exceptions.RenderError( "Invalid value length: '%s' is %s bytes, should be %s." % ( self.spec(), l, self.length ) ) return ret class Boolean(_Component): """ A boolean flag. name = true -name = false """ name = "" def __init__(self, value): self.value = value @classmethod def expr(cls): e = pp.Optional(pp.Literal("-"), default=True) e += pp.Literal(cls.name).suppress() def parse(s_, loc_, toks): val = True if toks[0] == "-": val = False return cls(val) return e.setParseAction(parse) def spec(self): return "%s%s" % ("-" if not self.value else "", self.name) class IntField(_Component): """ An integer field, where values can optionally specified by name. """ names = {} max = 16 preamble = "" def __init__(self, value): self.origvalue = value self.value = self.names.get(value, value) if self.value > self.max: raise exceptions.ParseException( "Value can't exceed %s" % self.max, 0, 0 ) @classmethod def expr(cls): parts = [pp.CaselessLiteral(i) for i in cls.names.keys()] m = pp.MatchFirst(parts) spec = m | v_integer.copy() spec = spec.setParseAction(lambda x: cls(*x)) if cls.preamble: spec = pp.Literal(cls.preamble).suppress() + spec return spec def values(self, settings): return [str(self.value)] def spec(self): return "%s%s" % (self.preamble, self.origvalue) class NestedMessage(Token): """ A nested message, as an escaped string with a preamble. """ preamble = "" nest_type = None def __init__(self, value): Token.__init__(self) self.value = value try: self.parsed = self.nest_type( self.nest_type.expr().parseString( value.val, parseAll=True ) ) except pp.ParseException as v: raise exceptions.ParseException(v.msg, v.line, v.col) @classmethod def expr(cls): e = pp.Literal(cls.preamble).suppress() e = e + TokValueLiteral.expr() return e.setParseAction(lambda x: cls(*x)) def values(self, settings): return [ self.value.get_generator(settings), ] def spec(self): return "%s%s" % (self.preamble, self.value.spec()) def freeze(self, settings): f = self.parsed.freeze(settings).spec() return self.__class__(TokValueLiteral(strutils.bytes_to_escaped_str(f)))
1
11,582
I'm not sure if we need to store path as bytes, and then decode/escape it when printing or just store it as a unicode string (as done here.)
mitmproxy-mitmproxy
py
@@ -7,7 +7,7 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( def test_hostname(host): - assert 'instance' == host.check_output('hostname -s') + assert host.check_output('hostname -s') == 'instance' def test_etc_molecule_directory(host):
1
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_hostname(host): assert 'instance' == host.check_output('hostname -s') def test_etc_molecule_directory(host): f = host.file('/etc/molecule') assert f.is_directory assert f.user == 'root' assert f.group == 'root' assert f.mode == 0o755 def test_etc_molecule_ansible_hostname_file(host): f = host.file('/etc/molecule/instance') assert f.is_file assert f.user == 'root' assert f.group == 'root' assert f.mode == 0o644 def test_interface(host): i = host.interface('eth1').addresses # NOTE(retr0h): Contains ipv4 and ipv6 addresses. assert len(i) == 2
1
7,138
Why is this flipped? Looks unrelated and our pattern is `expected == returned`.
ansible-community-molecule
py
@@ -2,7 +2,11 @@ <%= link_to workshop do %> <span class="icon"></span> <hgroup> - <h5><%= t '.heading' %></h5> + <% if workshop.online? %> + <h5><%= t '.heading.online' %></h5> + <% else %> + <h5><%= t '.heading.in_person' %></h5> + <% end %> <h4><%= workshop.name %></h4> </hgroup> <% end %>
1
<li class="workshop"> <%= link_to workshop do %> <span class="icon"></span> <hgroup> <h5><%= t '.heading' %></h5> <h4><%= workshop.name %></h4> </hgroup> <% end %> </li>
1
6,732
Can the `h5` tags be pulled outside of the `if` block?
thoughtbot-upcase
rb
@@ -2,6 +2,8 @@ namespace Shopsys\ShopBundle\Command; +use Shopsys\ShopBundle\Component\Image\DirectoryStructureCreator as ImageDirectoryStructureCreator; +use Shopsys\ShopBundle\Component\UploadedFile\DirectoryStructureCreator as UploadedFileDirectoryStructureCreator; use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand; use Symfony\Component\Console\Input\InputInterface; use Symfony\Component\Console\Output\OutputInterface;
1
<?php namespace Shopsys\ShopBundle\Command; use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand; use Symfony\Component\Console\Input\InputInterface; use Symfony\Component\Console\Output\OutputInterface; class CreateApplicationDirectoriesCommand extends ContainerAwareCommand { protected function configure() { $this ->setName('shopsys:create-directories') ->setDescription('Create application directories for locks, docs, content, images, uploaded files, etc.'); } /** * @param \Symfony\Component\Console\Input\InputInterface $input * @param \Symfony\Component\Console\Output\OutputInterface $output */ protected function execute(InputInterface $input, OutputInterface $output) { $this->createMiscellaneousDirectories($output); $this->createImageDirectories($output); $this->createUploadedFileDirectories($output); } private function createMiscellaneousDirectories(OutputInterface $output) { $rootDirectory = $this->getContainer()->getParameter('shopsys.root_dir'); $webDirectory = $this->getContainer()->getParameter('shopsys.web_dir'); $directories = [ $rootDirectory . '/build/stats', $rootDirectory . '/docs/generated', $rootDirectory . '/var/cache', $rootDirectory . '/var/lock', $rootDirectory . '/var/logs', $rootDirectory . '/var/errorPages', $webDirectory . '/assets/admin/styles', $webDirectory . '/assets/frontend/styles', $webDirectory . '/assets/scripts', $webDirectory . '/content/feeds', $webDirectory . '/content/sitemaps', $webDirectory . '/content/wysiwyg', ]; $filesystem = $this->getContainer()->get('filesystem'); /* @var $filesystem \Symfony\Component\Filesystem\Filesystem */ $filesystem->mkdir($directories); $output->writeln('<fg=green>Miscellaneous application directories were successfully created.</fg=green>'); } /** * @param \Symfony\Component\Console\Output\OutputInterface $output */ private function createImageDirectories(OutputInterface $output) { $imageDirectoryStructureCreator = $this->getContainer() ->get('shopsys.shop.component.image.directory_structure_creator'); /* @var $imageDirectoryStructureCreator \Shopsys\ShopBundle\Component\Image\DirectoryStructureCreator */ $imageDirectoryStructureCreator->makeImageDirectories(); $output->writeln('<fg=green>Directories for images were successfully created.</fg=green>'); } /** * @param \Symfony\Component\Console\Output\OutputInterface $output */ private function createUploadedFileDirectories(OutputInterface $output) { $uploadedFileDirectoryStructureCreator = $this->getContainer() ->get('shopsys.shop.component.uploaded_file.directory_structure_creator'); /* @var $uploadedFileDirectoryStructureCreator \Shopsys\ShopBundle\Component\UploadedFile\DirectoryStructureCreator */ $uploadedFileDirectoryStructureCreator->makeUploadedFileDirectories(); $output->writeln('<fg=green>Directories for UploadedFile entities were successfully created.</fg=green>'); } }
1
8,751
From @PetrHeinz review: this should be also aliased (`ImageDirectoryStructureCreator`)
shopsys-shopsys
php
@@ -170,7 +170,7 @@ func (s *ClusterScope) AdditionalTags() infrav1.Tags { s.AWSCluster.Spec.AdditionalTags = infrav1.Tags{} } - return s.AWSCluster.Spec.AdditionalTags + return infrav1.Tags(s.AWSCluster.Spec.AdditionalTags).DeepCopy() } // APIServerPort returns the APIServerPort to use when creating the load balancer.
1
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package scope import ( "context" "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/go-logr/logr" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/klogr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha2" "sigs.k8s.io/cluster-api-provider-aws/pkg/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { AWSClients Client client.Client Logger logr.Logger Cluster *clusterv1.Cluster AWSCluster *infrav1.AWSCluster } // NewClusterScope creates a new Scope from the supplied parameters. // This is meant to be called for each reconcile iteration. func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { if params.Cluster == nil { return nil, errors.New("failed to generate new scope from nil Cluster") } if params.AWSCluster == nil { return nil, errors.New("failed to generate new scope from nil AWSCluster") } if params.Logger == nil { params.Logger = klogr.New() } session, err := sessionForRegion(params.AWSCluster.Spec.Region) if err != nil { return nil, errors.Errorf("failed to create aws session: %v", err) } if params.AWSClients.EC2 == nil { ec2Client := ec2.New(session) ec2Client.Handlers.Complete.PushBack(recordAWSPermissionsIssue(params.AWSCluster)) params.AWSClients.EC2 = ec2Client } if params.AWSClients.ELB == nil { elbClient := elb.New(session) elbClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(params.AWSCluster)) params.AWSClients.ELB = elbClient } helper, err := patch.NewHelper(params.AWSCluster, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } return &ClusterScope{ Logger: params.Logger, client: params.Client, AWSClients: params.AWSClients, Cluster: params.Cluster, AWSCluster: params.AWSCluster, patchHelper: helper, }, nil } func recordAWSPermissionsIssue(target runtime.Object) func(r *request.Request) { return func(r *request.Request) { if awsErr, ok := r.Error.(awserr.Error); ok { switch awsErr.Code() { case "AuthFailure", "UnauthorizedOperation": record.Warnf(target, awsErr.Code(), "Operation %s failed with a credentials or permission issue", r.Operation.Name) } } } } // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { logr.Logger client client.Client patchHelper *patch.Helper AWSClients Cluster *clusterv1.Cluster AWSCluster *infrav1.AWSCluster } // Network returns the cluster network object. func (s *ClusterScope) Network() *infrav1.Network { return &s.AWSCluster.Status.Network } // VPC returns the cluster VPC. func (s *ClusterScope) VPC() *infrav1.VPCSpec { return &s.AWSCluster.Spec.NetworkSpec.VPC } // Subnets returns the cluster subnets. func (s *ClusterScope) Subnets() infrav1.Subnets { return s.AWSCluster.Spec.NetworkSpec.Subnets } // SecurityGroups returns the cluster security groups as a map, it creates the map if empty. func (s *ClusterScope) SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup { return s.AWSCluster.Status.Network.SecurityGroups } // Name returns the cluster name. func (s *ClusterScope) Name() string { return s.Cluster.Name } // Namespace returns the cluster namespace. func (s *ClusterScope) Namespace() string { return s.Cluster.Namespace } // Region returns the cluster region. func (s *ClusterScope) Region() string { return s.AWSCluster.Spec.Region } // ControlPlaneConfigMapName returns the name of the ConfigMap used to // coordinate the bootstrapping of control plane nodes. func (s *ClusterScope) ControlPlaneConfigMapName() string { return fmt.Sprintf("%s-controlplane", s.Cluster.UID) } // ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName. func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption { return client.MatchingLabels(map[string]string{ clusterv1.MachineClusterLabelName: s.Cluster.Name, }) } // Close closes the current scope persisting the cluster configuration and status. func (s *ClusterScope) Close() error { return s.patchHelper.Patch(context.TODO(), s.AWSCluster) } // AdditionalTags returns AdditionalTags from the scope's AWSCluster. The returned value will never be nil. func (s *ClusterScope) AdditionalTags() infrav1.Tags { if s.AWSCluster.Spec.AdditionalTags == nil { s.AWSCluster.Spec.AdditionalTags = infrav1.Tags{} } return s.AWSCluster.Spec.AdditionalTags } // APIServerPort returns the APIServerPort to use when creating the load balancer. func (s *ClusterScope) APIServerPort() int64 { if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { return int64(*s.Cluster.Spec.ClusterNetwork.APIServerPort) } return 6443 }
1
11,302
Isn't AdditionalTags already of `Tags` type? If not, we should make it so, if it's not a breaking change
kubernetes-sigs-cluster-api-provider-aws
go
@@ -1,4 +1,5 @@ -// Copyright (c) Microsoft. All rights reserved. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Client.Parallel { using System;
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Client.Parallel { using System; using System.Collections.Generic; using System.Threading.Tasks; using Microsoft.VisualStudio.TestPlatform.ObjectModel; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Engine; /// <summary> /// ParallelProxyDiscoveryManager that manages parallel discovery /// </summary> internal class ParallelProxyDiscoveryManager : ParallelOperationManager<IProxyDiscoveryManager>, IParallelProxyDiscoveryManager { #region DiscoverySpecificData private int discoveryCompletedClients = 0; private DiscoveryCriteria actualDiscoveryCriteria; private IEnumerator<string> sourceEnumerator; private Task lastParallelDiscoveryCleanUpTask = null; private IDictionary<IProxyDiscoveryManager, ITestDiscoveryEventsHandler> concurrentManagerHandlerMap; private ITestDiscoveryEventsHandler currentDiscoveryEventsHandler; private ParallelDiscoveryDataAggregator currentDiscoveryDataAggregator; #endregion #region Concurrency Keeper Objects /// <summary> /// LockObject to update discovery status in parallel /// </summary> private object discoveryStatusLockObject = new object(); #endregion public ParallelProxyDiscoveryManager(Func<IProxyDiscoveryManager> actualProxyManagerCreator, int parallelLevel, bool sharedHosts) : base(actualProxyManagerCreator, parallelLevel, sharedHosts) { } #region IProxyDiscoveryManager /// <inheritdoc/> public void Initialize() { this.DoActionOnAllManagers((proxyManager) => proxyManager.Initialize(), doActionsInParallel: true); } /// <inheritdoc/> void IProxyDiscoveryManager.DiscoverTests(DiscoveryCriteria discoveryCriteria, ITestDiscoveryEventsHandler eventHandler) { this.actualDiscoveryCriteria = discoveryCriteria; // Set the enumerator for parallel yielding of sources // Whenever a concurrent executor becomes free, it picks up the next source using this enumerator this.sourceEnumerator = discoveryCriteria.Sources.GetEnumerator(); this.DiscoverTestsPrivate(eventHandler); } /// <inheritdoc/> public void Abort() { this.DoActionOnAllManagers((proxyManager) => proxyManager.Abort(), doActionsInParallel: true); } /// <inheritdoc/> public void Close() { this.DoActionOnAllManagers(proxyManager => proxyManager.Close(), doActionsInParallel: true); } #endregion #region IParallelProxyDiscoveryManager methods /// <inheritdoc/> public bool HandlePartialDiscoveryComplete(IProxyDiscoveryManager proxyDiscoveryManager, long totalTests, IEnumerable<TestCase> lastChunk, bool isAborted) { var allDiscoverersCompleted = false; if (!this.SharedHosts) { this.concurrentManagerHandlerMap.Remove(proxyDiscoveryManager); proxyDiscoveryManager.Close(); proxyDiscoveryManager = this.CreateNewConcurrentManager(); var parallelEventsHandler = new ParallelDiscoveryEventsHandler( proxyDiscoveryManager, this.currentDiscoveryEventsHandler, this, this.currentDiscoveryDataAggregator); this.concurrentManagerHandlerMap.Add(proxyDiscoveryManager, parallelEventsHandler); } // In Case of Cancel or Abort, no need to trigger discovery for rest of the data // If there are no more sources/testcases, a parallel executor is truly done with discovery if (isAborted || !this.DiscoverTestsOnConcurrentManager(proxyDiscoveryManager)) { lock (this.discoveryStatusLockObject) { // Each concurrent Executor calls this method // So, we need to keep track of total discoverycomplete calls this.discoveryCompletedClients++; allDiscoverersCompleted = this.discoveryCompletedClients == this.concurrentManagerInstances.Length; } // verify that all executors are done with the discovery and there are no more sources/testcases to execute if (allDiscoverersCompleted) { // Reset enumerators this.sourceEnumerator = null; this.currentDiscoveryDataAggregator = null; this.currentDiscoveryEventsHandler = null; // Dispose concurrent executors // Do not do the cleanuptask in the current thread as we will unncessarily add to discovery time this.lastParallelDiscoveryCleanUpTask = Task.Run(() => { this.UpdateParallelLevel(0); }); } } return allDiscoverersCompleted; } #endregion #region ParallelOperationManager Methods /// <summary> /// Closes the instance of the IProxyDiscoveryManager Instance /// </summary> /// <param name="managerInstance"></param> protected override void DisposeInstance(IProxyDiscoveryManager managerInstance) { if (managerInstance != null) { try { managerInstance.Close(); } catch (Exception) { // ignore any exceptions } } } #endregion private void DiscoverTestsPrivate(ITestDiscoveryEventsHandler discoveryEventsHandler) { this.currentDiscoveryEventsHandler = discoveryEventsHandler; // Cleanup Task for cleaning up the parallel executors except for the default one // We do not do this in Sync so that this task does not add up to discovery time if (this.lastParallelDiscoveryCleanUpTask != null) { try { this.lastParallelDiscoveryCleanUpTask.Wait(); } catch (Exception ex) { // if there is an exception disposing off concurrent hosts ignore it if (EqtTrace.IsWarningEnabled) { EqtTrace.Warning("ParallelProxyDiscoveryManager: Exception while invoking an action on DiscoveryManager: {0}", ex); } } this.lastParallelDiscoveryCleanUpTask = null; } // Reset the discoverycomplete data this.discoveryCompletedClients = 0; // One data aggregator per parallel discovery this.currentDiscoveryDataAggregator = new ParallelDiscoveryDataAggregator(); this.concurrentManagerHandlerMap = new Dictionary<IProxyDiscoveryManager, ITestDiscoveryEventsHandler>(); for (int i = 0; i < this.concurrentManagerInstances.Length; i++) { var concurrentManager = this.concurrentManagerInstances[i]; var parallelEventsHandler = new ParallelDiscoveryEventsHandler( concurrentManager, discoveryEventsHandler, this, this.currentDiscoveryDataAggregator); this.concurrentManagerHandlerMap.Add(concurrentManager, parallelEventsHandler); Task.Run(() => this.DiscoverTestsOnConcurrentManager(concurrentManager)); } } /// <summary> /// Triggers the discovery for the next data object on the concurrent discoverer /// Each concurrent discoverer calls this method, once its completed working on previous data /// </summary> /// <param name="ProxyDiscoveryManager">Proxy discovery manager instance.</param> /// <returns>True, if discovery triggered</returns> private bool DiscoverTestsOnConcurrentManager(IProxyDiscoveryManager proxyDiscoveryManager) { DiscoveryCriteria discoveryCriteria = null; string nextSource = null; if (this.TryFetchNextSource(this.sourceEnumerator, out nextSource)) { EqtTrace.Info("ProxyParallelDiscoveryManager: Triggering test discovery for next source: {0}", nextSource); discoveryCriteria = new DiscoveryCriteria(new List<string>() { nextSource }, this.actualDiscoveryCriteria.FrequencyOfDiscoveredTestsEvent, this.actualDiscoveryCriteria.DiscoveredTestEventTimeout, this.actualDiscoveryCriteria.RunSettings); } if (discoveryCriteria != null) { proxyDiscoveryManager.DiscoverTests(discoveryCriteria, this.concurrentManagerHandlerMap[proxyDiscoveryManager]); return true; } return false; } } }
1
11,389
Add blank line below license header.
microsoft-vstest
.cs
@@ -24,6 +24,7 @@ func init() { initRTC() initUARTClock() initI2CClock() + initPWMClocks() // connect to UART machine.UART0.Configure(machine.UARTConfig{})
1
// +build sam,atsamd21g18a package runtime import ( "device/arm" "device/sam" "machine" "unsafe" ) type timeUnit int64 //go:export Reset_Handler func main() { preinit() initAll() callMain() abort() } func init() { initClocks() initRTC() initUARTClock() initI2CClock() // connect to UART machine.UART0.Configure(machine.UARTConfig{}) } func putchar(c byte) { machine.UART0.WriteByte(c) } func initClocks() { // Set 1 Flash Wait State for 48MHz, required for 3.3V operation according to SAMD21 Datasheet sam.NVMCTRL.CTRLB |= (sam.NVMCTRL_CTRLB_RWS_HALF << sam.NVMCTRL_CTRLB_RWS_Pos) // Turn on the digital interface clock sam.PM.APBAMASK |= sam.PM_APBAMASK_GCLK_ // turn off RTC sam.PM.APBAMASK &^= sam.PM_APBAMASK_RTC_ // Enable OSC32K clock (Internal 32.768Hz oscillator). // This requires registers that are not included in the SVD file. // This is from samd21g18a.h and nvmctrl.h: // // #define NVMCTRL_OTP4 0x00806020 // // #define SYSCTRL_FUSES_OSC32K_CAL_ADDR (NVMCTRL_OTP4 + 4) // #define SYSCTRL_FUSES_OSC32K_CAL_Pos 6 /** (NVMCTRL_OTP4) OSC32K Calibration */ // #define SYSCTRL_FUSES_OSC32K_CAL_Msk (0x7Fu << SYSCTRL_FUSES_OSC32K_CAL_Pos) // #define SYSCTRL_FUSES_OSC32K_CAL(value) ((SYSCTRL_FUSES_OSC32K_CAL_Msk & ((value) << SYSCTRL_FUSES_OSC32K_CAL_Pos))) // u32_t fuse = *(u32_t *)FUSES_OSC32K_CAL_ADDR; // u32_t calib = (fuse & FUSES_OSC32K_CAL_Msk) >> FUSES_OSC32K_CAL_Pos; fuse := *(*uint32)(unsafe.Pointer(uintptr(0x00806020) + 4)) calib := (fuse & uint32(0x7f<<6)) >> 6 // SYSCTRL_OSC32K_CALIB(calib) | // SYSCTRL_OSC32K_STARTUP(0x6u) | // SYSCTRL_OSC32K_EN32K | SYSCTRL_OSC32K_ENABLE; sam.SYSCTRL.OSC32K = sam.RegValue((calib << sam.SYSCTRL_OSC32K_CALIB_Pos) | (0x6 << sam.SYSCTRL_OSC32K_STARTUP_Pos) | sam.SYSCTRL_OSC32K_EN32K | sam.SYSCTRL_OSC32K_EN1K | sam.SYSCTRL_OSC32K_ENABLE) // Wait for oscillator stabilization for (sam.SYSCTRL.PCLKSR & sam.SYSCTRL_PCLKSR_OSC32KRDY) == 0 { } // Software reset the module to ensure it is re-initialized correctly sam.GCLK.CTRL = sam.GCLK_CTRL_SWRST // Wait for reset to complete for (sam.GCLK.CTRL&sam.GCLK_CTRL_SWRST) > 0 && (sam.GCLK.STATUS&sam.GCLK_STATUS_SYNCBUSY) > 0 { } // Put OSC32K as source of Generic Clock Generator 1 sam.GCLK.GENDIV = sam.RegValue((1 << sam.GCLK_GENDIV_ID_Pos) | (0 << sam.GCLK_GENDIV_DIV_Pos)) waitForSync() // GCLK_GENCTRL_ID(1) | GCLK_GENCTRL_SRC_OSC32K | GCLK_GENCTRL_GENEN; sam.GCLK.GENCTRL = sam.RegValue((1 << sam.GCLK_GENCTRL_ID_Pos) | (sam.GCLK_GENCTRL_SRC_OSC32K << sam.GCLK_GENCTRL_SRC_Pos) | sam.GCLK_GENCTRL_GENEN) waitForSync() // Use Generic Clock Generator 1 as source for Generic Clock Multiplexer 0 (DFLL48M reference) sam.GCLK.CLKCTRL = sam.RegValue16((sam.GCLK_CLKCTRL_ID_DFLL48 << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK1 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) waitForSync() // Remove the OnDemand mode, Bug http://avr32.icgroup.norway.atmel.com/bugzilla/show_bug.cgi?id=9905 sam.SYSCTRL.DFLLCTRL = sam.SYSCTRL_DFLLCTRL_ENABLE // Wait for ready for (sam.SYSCTRL.PCLKSR & sam.SYSCTRL_PCLKSR_DFLLRDY) == 0 { } // Handle DFLL calibration based on info learned from Arduino SAMD implementation, // using value stored in fuse. // #define SYSCTRL_FUSES_DFLL48M_COARSE_CAL_ADDR (NVMCTRL_OTP4 + 4) // #define SYSCTRL_FUSES_DFLL48M_COARSE_CAL_Pos 26 /**< \brief (NVMCTRL_OTP4) DFLL48M Coarse Calibration */ // #define SYSCTRL_FUSES_DFLL48M_COARSE_CAL_Msk (0x3Fu << SYSCTRL_FUSES_DFLL48M_COARSE_CAL_Pos) // #define SYSCTRL_FUSES_DFLL48M_COARSE_CAL(value) ((SYSCTRL_FUSES_DFLL48M_COARSE_CAL_Msk & ((value) << SYSCTRL_FUSES_DFLL48M_COARSE_CAL_Pos))) coarse := (fuse >> 26) & 0x3F if coarse == 0x3f { coarse = 0x1f } sam.SYSCTRL.DFLLVAL |= sam.RegValue(coarse << sam.SYSCTRL_DFLLVAL_COARSE_Pos) sam.SYSCTRL.DFLLVAL |= (0x1ff << sam.SYSCTRL_DFLLVAL_FINE_Pos) // Write full configuration to DFLL control register // SYSCTRL_DFLLMUL_CSTEP( 0x1f / 4 ) | // Coarse step is 31, half of the max value // SYSCTRL_DFLLMUL_FSTEP( 10 ) | // SYSCTRL_DFLLMUL_MUL( (48000) ) ; sam.SYSCTRL.DFLLMUL = sam.RegValue(((31 / 4) << sam.SYSCTRL_DFLLMUL_CSTEP_Pos) | (10 << sam.SYSCTRL_DFLLMUL_FSTEP_Pos) | (48000 << sam.SYSCTRL_DFLLMUL_MUL_Pos)) // disable DFLL sam.SYSCTRL.DFLLCTRL = 0 waitForSync() sam.SYSCTRL.DFLLCTRL |= sam.SYSCTRL_DFLLCTRL_MODE | sam.SYSCTRL_DFLLCTRL_CCDIS | sam.SYSCTRL_DFLLCTRL_USBCRM | sam.SYSCTRL_DFLLCTRL_BPLCKC // Wait for ready for (sam.SYSCTRL.PCLKSR & sam.SYSCTRL_PCLKSR_DFLLRDY) == 0 { } // Re-enable the DFLL sam.SYSCTRL.DFLLCTRL |= sam.SYSCTRL_DFLLCTRL_ENABLE // Wait for ready for (sam.SYSCTRL.PCLKSR & sam.SYSCTRL_PCLKSR_DFLLRDY) == 0 { } // Switch Generic Clock Generator 0 to DFLL48M. CPU will run at 48MHz. sam.GCLK.GENDIV = sam.RegValue((0 << sam.GCLK_GENDIV_ID_Pos) | (0 << sam.GCLK_GENDIV_DIV_Pos)) waitForSync() sam.GCLK.GENCTRL = sam.RegValue((0 << sam.GCLK_GENCTRL_ID_Pos) | (sam.GCLK_GENCTRL_SRC_DFLL48M << sam.GCLK_GENCTRL_SRC_Pos) | sam.GCLK_GENCTRL_IDC | sam.GCLK_GENCTRL_GENEN) waitForSync() // Modify PRESCaler value of OSC8M to have 8MHz sam.SYSCTRL.OSC8M |= (sam.SYSCTRL_OSC8M_PRESC_0 << sam.SYSCTRL_OSC8M_PRESC_Pos) sam.SYSCTRL.OSC8M &^= (1 << sam.SYSCTRL_OSC8M_ONDEMAND_Pos) // Wait for oscillator stabilization for (sam.SYSCTRL.PCLKSR & sam.SYSCTRL_PCLKSR_OSC8MRDY) == 0 { } // Use OSC8M as source for Generic Clock Generator 3 sam.GCLK.GENDIV = sam.RegValue((3 << sam.GCLK_GENDIV_ID_Pos)) waitForSync() sam.GCLK.GENCTRL = sam.RegValue((3 << sam.GCLK_GENCTRL_ID_Pos) | (sam.GCLK_GENCTRL_SRC_OSC8M << sam.GCLK_GENCTRL_SRC_Pos) | sam.GCLK_GENCTRL_GENEN) waitForSync() // Use OSC32K as source for Generic Clock Generator 2 // OSC32K/1 -> GCLK2 at 32KHz sam.GCLK.GENDIV = sam.RegValue(2 << sam.GCLK_GENDIV_ID_Pos) waitForSync() sam.GCLK.GENCTRL = sam.RegValue((2 << sam.GCLK_GENCTRL_ID_Pos) | (sam.GCLK_GENCTRL_SRC_OSC32K << sam.GCLK_GENCTRL_SRC_Pos) | sam.GCLK_GENCTRL_GENEN) waitForSync() // Use GCLK2 for RTC sam.GCLK.CLKCTRL = sam.RegValue16((sam.GCLK_CLKCTRL_ID_RTC << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK2 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) waitForSync() // Set the CPU, APBA, B, and C dividers sam.PM.CPUSEL = sam.PM_CPUSEL_CPUDIV_DIV1 sam.PM.APBASEL = sam.PM_APBASEL_APBADIV_DIV1 sam.PM.APBBSEL = sam.PM_APBBSEL_APBBDIV_DIV1 sam.PM.APBCSEL = sam.PM_APBCSEL_APBCDIV_DIV1 // Disable automatic NVM write operations sam.NVMCTRL.CTRLB |= sam.NVMCTRL_CTRLB_MANW } func initRTC() { // turn on digital interface clock sam.PM.APBAMASK |= sam.PM_APBAMASK_RTC_ // disable RTC sam.RTC_MODE0.CTRL = 0 waitForSync() // reset RTC sam.RTC_MODE0.CTRL |= sam.RTC_MODE0_CTRL_SWRST waitForSync() // set Mode0 to 32-bit counter (mode 0) with prescaler 1 and GCLK2 is 32KHz/1 sam.RTC_MODE0.CTRL = sam.RegValue16((sam.RTC_MODE0_CTRL_MODE_COUNT32 << sam.RTC_MODE0_CTRL_MODE_Pos) | (sam.RTC_MODE0_CTRL_PRESCALER_DIV1 << sam.RTC_MODE0_CTRL_PRESCALER_Pos) | sam.RTC_MODE0_CTRL_MATCHCLR) waitForSync() sam.RTC_MODE0.COMP0 = 0xffffffff waitForSync() // re-enable RTC sam.RTC_MODE0.CTRL |= sam.RTC_MODE0_CTRL_ENABLE waitForSync() arm.SetPriority(sam.IRQ_RTC, 0xc0) arm.EnableIRQ(sam.IRQ_RTC) } func waitForSync() { for (sam.GCLK.STATUS & sam.GCLK_STATUS_SYNCBUSY) > 0 { } } // treat all ticks params coming from runtime as being in microseconds const tickMicros = 1000 var ( timestamp timeUnit // ticks since boottime timerLastCounter uint64 ) //go:volatile type isrFlag bool var timerWakeup isrFlag const asyncScheduler = false // sleepTicks should sleep for d number of microseconds. func sleepTicks(d timeUnit) { for d != 0 { ticks() // update timestamp ticks := uint32(d) timerSleep(ticks) d -= timeUnit(ticks) } } // ticks returns number of microseconds since start. func ticks() timeUnit { // request read of count sam.RTC_MODE0.READREQ = sam.RTC_MODE0_READREQ_RREQ waitForSync() rtcCounter := uint64(sam.RTC_MODE0.COUNT) * 30 // each counter tick == 30.5us offset := (rtcCounter - timerLastCounter) // change since last measurement timerLastCounter = rtcCounter timestamp += timeUnit(offset) // TODO: not precise return timestamp } // ticks are in microseconds func timerSleep(ticks uint32) { timerWakeup = false if ticks < 30 { // have to have at least one clock count ticks = 30 } // request read of count sam.RTC_MODE0.READREQ = sam.RTC_MODE0_READREQ_RREQ waitForSync() // set compare value cnt := sam.RTC_MODE0.COUNT sam.RTC_MODE0.COMP0 = sam.RegValue(uint32(cnt) + (ticks / 30)) // each counter tick == 30.5us waitForSync() // enable IRQ for CMP0 compare sam.RTC_MODE0.INTENSET |= sam.RTC_MODE0_INTENSET_CMP0 for !timerWakeup { arm.Asm("wfi") } } //go:export RTC_IRQHandler func handleRTC() { // disable IRQ for CMP0 compare sam.RTC_MODE0.INTFLAG = sam.RTC_MODE0_INTENSET_CMP0 timerWakeup = true } func initUARTClock() { // Turn on clock to SERCOM0 for UART0 sam.PM.APBCMASK |= sam.PM_APBCMASK_SERCOM0_ // Use GCLK0 for SERCOM0 aka UART0 // GCLK_CLKCTRL_ID( clockId ) | // Generic Clock 0 (SERCOMx) // GCLK_CLKCTRL_GEN_GCLK0 | // Generic Clock Generator 0 is source // GCLK_CLKCTRL_CLKEN ; sam.GCLK.CLKCTRL = sam.RegValue16((sam.GCLK_CLKCTRL_ID_SERCOM0_CORE << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK0 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) waitForSync() // Turn on clock to SERCOM1 for UART1 sam.PM.APBCMASK |= sam.PM_APBCMASK_SERCOM1_ // Use GCLK0 for SERCOM1 aka UART1 // GCLK_CLKCTRL_ID( clockId ) | // Generic Clock 0 (SERCOMx) // GCLK_CLKCTRL_GEN_GCLK0 | // Generic Clock Generator 0 is source // GCLK_CLKCTRL_CLKEN ; sam.GCLK.CLKCTRL = sam.RegValue16((sam.GCLK_CLKCTRL_ID_SERCOM1_CORE << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK0 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) waitForSync() } func initI2CClock() { // Turn on clock to SERCOM3 for I2C0 sam.PM.APBCMASK |= sam.PM_APBCMASK_SERCOM3_ // Use GCLK0 for SERCOM3 aka I2C0 // GCLK_CLKCTRL_ID( clockId ) | // Generic Clock 0 (SERCOMx) // GCLK_CLKCTRL_GEN_GCLK0 | // Generic Clock Generator 0 is source // GCLK_CLKCTRL_CLKEN ; sam.GCLK.CLKCTRL = sam.RegValue16((sam.GCLK_CLKCTRL_ID_SERCOM3_CORE << sam.GCLK_CLKCTRL_ID_Pos) | (sam.GCLK_CLKCTRL_GEN_GCLK0 << sam.GCLK_CLKCTRL_GEN_Pos) | sam.GCLK_CLKCTRL_CLKEN) waitForSync() }
1
6,449
Perhaps this could be moved into `InitPWM`? I suspect not initializing the clocks will reduce power consumption.
tinygo-org-tinygo
go
@@ -0,0 +1,9 @@ +class Quiz < ActiveRecord::Base + validates :title, presence: true + + has_many :questions, -> { order(position: :asc) }, dependent: :destroy + + def first_question + questions.first + end +end
1
1
14,623
I believe this `order` isn't tested.
thoughtbot-upcase
rb
@@ -7,12 +7,13 @@ # include "../../_Plugin_Helper.h" # include "../Helpers/ESPEasyStatistics.h" # include "../Static/WebStaticData.h" -HELPERS_ESPEASY_MATH_H +//HELPERS_ESPEASY_MATH_H //clumsy-stefan: what's this for? + +#ifdef WEBSERVER_METRICS #ifdef ESP32 # include <esp_partition.h> #endif // ifdef ESP32 - void handle_metrics() { TXBuffer.startStream(F("text/plain"), F("*"));
1
# include "../WebServer/Metrics.h" # include "../WebServer/WebServer.h" # include "../../ESPEasy-Globals.h" # include "../Commands/Diagnostic.h" # include "../ESPEasyCore/ESPEasyNetwork.h" # include "../ESPEasyCore/ESPEasyWifi.h" # include "../../_Plugin_Helper.h" # include "../Helpers/ESPEasyStatistics.h" # include "../Static/WebStaticData.h" HELPERS_ESPEASY_MATH_H #ifdef ESP32 # include <esp_partition.h> #endif // ifdef ESP32 void handle_metrics() { TXBuffer.startStream(F("text/plain"), F("*")); //uptime addHtml(F("# HELP espeasy_uptime current device uptime in minutes\n")); addHtml(F("# TYPE espeasy_uptime counter\n")); addHtml(F("espeasy_uptime ")); addHtml(getValue(LabelType::UPTIME)); addHtml('\n'); //load addHtml(F("# HELP espeasy_load device percentage load\n")); addHtml(F("# TYPE espeasy_load gauge\n")); addHtml(F("espeasy_load ")); addHtml(getValue(LabelType::LOAD_PCT)); addHtml('\n'); //Free RAM addHtml(F("# HELP espeasy_free_ram device amount of RAM free in Bytes\n")); addHtml(F("# TYPE espeasy_free_ram gauge\n")); addHtml(F("espeasy_free_ram ")); addHtml(getValue(LabelType::FREE_MEM)); addHtml('\n'); //Free RAM addHtml(F("# HELP espeasy_free_stack device amount of Stack free in Bytes\n")); addHtml(F("# TYPE espeasy_free_stack gauge\n")); addHtml(F("espeasy_free_stack ")); addHtml(getValue(LabelType::FREE_STACK)); addHtml('\n'); //Wifi strength addHtml(F("# HELP espeasy_wifi_rssi Wifi connection Strength\n")); addHtml(F("# TYPE espeasy_wifi_rssi gauge\n")); addHtml(F("espeasy_wifi_rssi ")); addHtml(getValue(LabelType::WIFI_RSSI)); addHtml('\n'); //Wifi uptime addHtml(F("# HELP espeasy_wifi_connected Time wifi has been connected in milliseconds\n")); addHtml(F("# TYPE espeasy_wifi_connected counter\n")); addHtml(F("espeasy_wifi_connected ")); addHtml(getValue(LabelType::CONNECTED_MSEC)); addHtml('\n'); //Wifi reconnects addHtml(F("# HELP espeasy_wifi_reconnects Number of times Wifi has reconnected since boot\n")); addHtml(F("# TYPE espeasy_wifi_reconnects counter\n")); addHtml(F("espeasy_wifi_reconnects ")); addHtml(getValue(LabelType::NUMBER_RECONNECTS)); addHtml('\n'); //devices handle_metrics_devices(); TXBuffer.endStream(); } void handle_metrics_devices(){ for (taskIndex_t x = 0; validTaskIndex(x); x++) { const deviceIndex_t DeviceIndex = getDeviceIndex_from_TaskIndex(x); const bool pluginID_set = INVALID_PLUGIN_ID != Settings.TaskDeviceNumber[x]; if (pluginID_set){ LoadTaskSettings(x); if (Settings.TaskDeviceEnabled[x]){ String deviceName = ExtraTaskSettings.TaskDeviceName; addHtml(F("# HELP espeasy_device_")); addHtml(deviceName); addHtml(F(" Values from connected device\n")); addHtml(F("# TYPE espeasy_device_")); addHtml(deviceName); addHtml(F(" gauge\n")); if (validDeviceIndex(DeviceIndex)) { String customValuesString; //const bool customValues = PluginCall(PLUGIN_WEBFORM_SHOW_VALUES, &TempEvent, customValuesString); const bool customValues = 0; //TODO: handle custom values if (!customValues) { const uint8_t valueCount = getValueCountForTask(x); for (uint8_t varNr = 0; varNr < valueCount; varNr++) { if (validPluginID_fullcheck(Settings.TaskDeviceNumber[x])) { addHtml(F("espeasy_device_")); addHtml(deviceName); addHtml(F("{valueName=\"")); addHtml(ExtraTaskSettings.TaskDeviceValueNames[varNr]); addHtml(F("\"} ")); addHtml(formatUserVarNoCheck(x, varNr)); addHtml('\n'); } } } } } } } }
1
22,515
No idea why it ended up in the code. You can remove the entire line.
letscontrolit-ESPEasy
cpp
@@ -146,9 +146,10 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *secv1alpha1.C appliedToGroupNamesForRule = append(appliedToGroupNamesForRule, atGroup) appliedToGroupNamesSet.Insert(atGroup) } + iFromPeers := n.toAntreaPeerForCRD(ingressRule.From, cnp, controlplane.DirectionIn, namedPortExists) rules = append(rules, controlplane.NetworkPolicyRule{ Direction: controlplane.DirectionIn, - From: *n.toAntreaPeerForCRD(ingressRule.From, cnp, controlplane.DirectionIn, namedPortExists), + From: iFromPeers, Services: services, Action: ingressRule.Action, Priority: int32(idx),
1
// Copyright 2020 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package networkpolicy import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/apis/controlplane" secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1" antreatypes "github.com/vmware-tanzu/antrea/pkg/controller/types" ) // addCNP receives ClusterNetworkPolicy ADD events and creates resources // which can be consumed by agents to configure corresponding rules on the Nodes. func (n *NetworkPolicyController) addCNP(obj interface{}) { defer n.heartbeat("addCNP") cnp := obj.(*secv1alpha1.ClusterNetworkPolicy) klog.Infof("Processing ClusterNetworkPolicy %s ADD event", cnp.Name) // Create an internal NetworkPolicy object corresponding to this // ClusterNetworkPolicy and enqueue task to internal NetworkPolicy Workqueue. internalNP := n.processClusterNetworkPolicy(cnp) klog.V(2).Infof("Creating new internal NetworkPolicy %s for %s", internalNP.Name, internalNP.SourceRef.ToString()) n.internalNetworkPolicyStore.Create(internalNP) key := internalNetworkPolicyKeyFunc(cnp) n.enqueueInternalNetworkPolicy(key) } // updateCNP receives ClusterNetworkPolicy UPDATE events and updates resources // which can be consumed by agents to configure corresponding rules on the Nodes. func (n *NetworkPolicyController) updateCNP(old, cur interface{}) { defer n.heartbeat("updateCNP") curCNP := cur.(*secv1alpha1.ClusterNetworkPolicy) klog.Infof("Processing ClusterNetworkPolicy %s UPDATE event", curCNP.Name) // Update an internal NetworkPolicy, corresponding to this NetworkPolicy and // enqueue task to internal NetworkPolicy Workqueue. curInternalNP := n.processClusterNetworkPolicy(curCNP) klog.V(2).Infof("Updating existing internal NetworkPolicy %s for %s", curInternalNP.Name, curInternalNP.SourceRef.ToString()) // Retrieve old secv1alpha1.NetworkPolicy object. oldCNP := old.(*secv1alpha1.ClusterNetworkPolicy) // Old and current NetworkPolicy share the same key. key := internalNetworkPolicyKeyFunc(oldCNP) // Lock access to internal NetworkPolicy store such that concurrent access // to an internal NetworkPolicy is not allowed. This will avoid the // case in which an Update to an internal NetworkPolicy object may // cause the SpanMeta member to be overridden with stale SpanMeta members // from an older internal NetworkPolicy. n.internalNetworkPolicyMutex.Lock() oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key) oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy) // Must preserve old internal NetworkPolicy Span. curInternalNP.SpanMeta = oldInternalNP.SpanMeta n.internalNetworkPolicyStore.Update(curInternalNP) // Unlock the internal NetworkPolicy store. n.internalNetworkPolicyMutex.Unlock() // Enqueue addressGroup keys to update their Node span. for _, rule := range curInternalNP.Rules { for _, addrGroupName := range rule.From.AddressGroups { n.enqueueAddressGroup(addrGroupName) } for _, addrGroupName := range rule.To.AddressGroups { n.enqueueAddressGroup(addrGroupName) } } n.enqueueInternalNetworkPolicy(key) for _, atg := range oldInternalNP.AppliedToGroups { // Delete the old AppliedToGroup object if it is not referenced // by any internal NetworkPolicy. n.deleteDereferencedAppliedToGroup(atg) } n.deleteDereferencedAddressGroups(oldInternalNP) } // deleteCNP receives ClusterNetworkPolicy DELETED events and deletes resources // which can be consumed by agents to delete corresponding rules on the Nodes. func (n *NetworkPolicyController) deleteCNP(old interface{}) { cnp, ok := old.(*secv1alpha1.ClusterNetworkPolicy) if !ok { tombstone, ok := old.(cache.DeletedFinalStateUnknown) if !ok { klog.Errorf("Error decoding object when deleting ClusterNetworkPolicy, invalid type: %v", old) return } cnp, ok = tombstone.Obj.(*secv1alpha1.ClusterNetworkPolicy) if !ok { klog.Errorf("Error decoding object tombstone when deleting ClusterNetworkPolicy, invalid type: %v", tombstone.Obj) return } } defer n.heartbeat("deleteCNP") klog.Infof("Processing ClusterNetworkPolicy %s DELETE event", cnp.Name) key := internalNetworkPolicyKeyFunc(cnp) oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key) oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy) klog.V(2).Infof("Deleting internal NetworkPolicy %s for %s", oldInternalNP.Name, oldInternalNP.SourceRef.ToString()) err := n.internalNetworkPolicyStore.Delete(key) if err != nil { klog.Errorf("Error deleting internal NetworkPolicy during NetworkPolicy %s delete: %v", cnp.Name, err) return } for _, atg := range oldInternalNP.AppliedToGroups { n.deleteDereferencedAppliedToGroup(atg) } n.deleteDereferencedAddressGroups(oldInternalNP) } // processClusterNetworkPolicy creates an internal NetworkPolicy instance // corresponding to the secv1alpha1.ClusterNetworkPolicy object. This method // does not commit the internal NetworkPolicy in store, instead returns an // instance to the caller wherein, it will be either stored as a new Object // in case of ADD event or modified and store the updated instance, in case // of an UPDATE event. func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *secv1alpha1.ClusterNetworkPolicy) *antreatypes.NetworkPolicy { appliedToPerRule := len(cnp.Spec.AppliedTo) == 0 // appliedToGroupNames tracks all distinct appliedToGroups referred to by the ClusterNetworkPolicy, // either in the spec section or in ingress/egress rules. // The span calculation and stale appliedToGroup cleanup logic would work seamlessly for both cases. appliedToGroupNamesSet := sets.String{} // Create AppliedToGroup for each AppliedTo present in ClusterNetworkPolicy spec. for _, at := range cnp.Spec.AppliedTo { appliedToGroupNamesSet.Insert(n.createAppliedToGroup( "", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector)) } rules := make([]controlplane.NetworkPolicyRule, 0, len(cnp.Spec.Ingress)+len(cnp.Spec.Egress)) // Compute NetworkPolicyRule for Ingress Rule. for idx, ingressRule := range cnp.Spec.Ingress { // Set default action to ALLOW to allow traffic. services, namedPortExists := toAntreaServicesForCRD(ingressRule.Ports) var appliedToGroupNamesForRule []string // Create AppliedToGroup for each AppliedTo present in the ingress rule. for _, at := range ingressRule.AppliedTo { atGroup := n.createAppliedToGroup("", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector) appliedToGroupNamesForRule = append(appliedToGroupNamesForRule, atGroup) appliedToGroupNamesSet.Insert(atGroup) } rules = append(rules, controlplane.NetworkPolicyRule{ Direction: controlplane.DirectionIn, From: *n.toAntreaPeerForCRD(ingressRule.From, cnp, controlplane.DirectionIn, namedPortExists), Services: services, Action: ingressRule.Action, Priority: int32(idx), EnableLogging: ingressRule.EnableLogging, AppliedToGroups: appliedToGroupNamesForRule, }) } // Compute NetworkPolicyRule for Egress Rule. for idx, egressRule := range cnp.Spec.Egress { // Set default action to ALLOW to allow traffic. services, namedPortExists := toAntreaServicesForCRD(egressRule.Ports) var appliedToGroupNamesForRule []string // Create AppliedToGroup for each AppliedTo present in the ingress rule. for _, at := range egressRule.AppliedTo { atGroup := n.createAppliedToGroup("", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector) appliedToGroupNamesForRule = append(appliedToGroupNamesForRule, atGroup) appliedToGroupNamesSet.Insert(atGroup) } rules = append(rules, controlplane.NetworkPolicyRule{ Direction: controlplane.DirectionOut, To: *n.toAntreaPeerForCRD(egressRule.To, cnp, controlplane.DirectionOut, namedPortExists), Services: services, Action: egressRule.Action, Priority: int32(idx), EnableLogging: egressRule.EnableLogging, AppliedToGroups: appliedToGroupNamesForRule, }) } tierPriority := n.getTierPriority(cnp.Spec.Tier) internalNetworkPolicy := &antreatypes.NetworkPolicy{ Name: internalNetworkPolicyKeyFunc(cnp), Generation: cnp.Generation, SourceRef: &controlplane.NetworkPolicyReference{ Type: controlplane.AntreaClusterNetworkPolicy, Name: cnp.Name, UID: cnp.UID, }, UID: cnp.UID, AppliedToGroups: appliedToGroupNamesSet.List(), Rules: rules, Priority: &cnp.Spec.Priority, TierPriority: &tierPriority, AppliedToPerRule: appliedToPerRule, } return internalNetworkPolicy }
1
31,053
"i" and "From" is a bit duplicate, and should it be singular given the method name? I mean "fromPeer" or just "from"?
antrea-io-antrea
go
@@ -32,6 +32,7 @@ public class TestOAuth2AuthorizationRequests { return OAuth2AuthorizationRequest.authorizationCode() .authorizationUri("https://example.com/login/oauth/authorize") .clientId(clientId) + .scope("openid") .redirectUri("https://example.com/authorize/oauth2/code/registration-id") .state("state") .additionalParameters(additionalParameters);
1
/* * Copyright 2002-2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.oauth2.core.endpoint; import java.util.HashMap; import java.util.Map; /** * @author Rob Winch * @since 5.1 */ public class TestOAuth2AuthorizationRequests { public static OAuth2AuthorizationRequest.Builder request() { String registrationId = "registration-id"; String clientId = "client-id"; Map<String, Object> additionalParameters = new HashMap<>(); additionalParameters.put(OAuth2ParameterNames.REGISTRATION_ID, registrationId); return OAuth2AuthorizationRequest.authorizationCode() .authorizationUri("https://example.com/login/oauth/authorize") .clientId(clientId) .redirectUri("https://example.com/authorize/oauth2/code/registration-id") .state("state") .additionalParameters(additionalParameters); } }
1
10,936
Did you verify this isn't going to break any existing test assumptions? We probably shouldn't modify this as we don't know what impact this is having on existing tests. It may invalidate a test that has different expectations of the scopes.
spring-projects-spring-security
java
@@ -43,12 +43,16 @@ func (a *defaultAuthorizer) Authorize(_ context.Context, claims *Claims, target if target.Namespace == "temporal-system" || target.Namespace == "" { return Result{Decision: DecisionAllow}, nil } - + if claims == nil { + return Result{Decision: DecisionDeny}, nil + } // Check system level permissions if claims.System == RoleAdmin || claims.System == RoleWriter { return Result{Decision: DecisionAllow}, nil } - + if claims.Namespaces == nil { + return Result{Decision: DecisionDeny}, nil + } roles, found := claims.Namespaces[target.Namespace] if !found || roles == RoleUndefined { return Result{Decision: DecisionDeny}, nil
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package authorization import "context" type defaultAuthorizer struct{} // NewDefaultAuthorizer creates a default authorizer func NewDefaultAuthorizer() Authorizer { return &defaultAuthorizer{} } func (a *defaultAuthorizer) Authorize(_ context.Context, claims *Claims, target *CallTarget) (Result, error) { // TODO: This is a temporary workaround to allow calls to system namespace and // calls with no namespace to pass through. When handling of mTLS data is added, // we should remove "temporal-system" from here. Handling of call with // no namespace will need to be performed at the API level, so that data would // be filtered based of caller's permissions to namespaces and system. if target.Namespace == "temporal-system" || target.Namespace == "" { return Result{Decision: DecisionAllow}, nil } // Check system level permissions if claims.System == RoleAdmin || claims.System == RoleWriter { return Result{Decision: DecisionAllow}, nil } roles, found := claims.Namespaces[target.Namespace] if !found || roles == RoleUndefined { return Result{Decision: DecisionDeny}, nil } return Result{Decision: DecisionAllow}, nil }
1
10,970
technically not necessary since reading from a nil dictionary will always return found = false, so this should already be handled
temporalio-temporal
go
@@ -141,6 +141,9 @@ type StatsDriver interface { // VolumeUsageByNode returns capacity usage of all volumes and snaps for a // given node VolumeUsageByNode(nodeID string) (*api.VolumeUsageByNode, error) + // RelaxedReclaimPurge triggers the purge of RelaxedReclaim queue for a + // given node + RelaxedReclaimPurge(nodeID string) (*api.RelaxedReclaimPurge, error) } type QuiesceDriver interface {
1
package volume import ( "errors" "github.com/libopenstorage/openstorage/api" ) var ( // ErrAlreadyShutdown returned when driver is shutdown ErrAlreadyShutdown = errors.New("VolumeDriverProvider already shutdown") // ErrExit returned when driver already registered ErrExist = errors.New("Already exists") // ErrDriverNotFound returned when a driver is not registered ErrDriverNotFound = errors.New("Driver implementation not found") // ErrDriverInitializing returned when a driver is initializing ErrDriverInitializing = errors.New("Driver is initializing") // ErrEnoEnt returned when volume does not exist ErrEnoEnt = errors.New("Volume does not exist.") // ErrEnomem returned when we are out of memory ErrEnomem = errors.New("Out of memory.") // ErrEinval returned when an invalid input is provided ErrEinval = errors.New("Invalid argument") // ErrVolDetached returned when volume is in detached state ErrVolDetached = errors.New("Volume is detached") // ErrVolAttached returned when volume is in attached state ErrVolAttached = errors.New("Volume is attached") // ErrVolAttachedOnRemoteNode returned when volume is in attached on different node ErrVolAttachedOnRemoteNode = errors.New("Volume is attached on another node") // ErrVolAttachedScale returned when volume is attached and can be scaled ErrVolAttachedScale = errors.New("Volume is attached on another node." + " Increase scale factor to create more instances") // ErrVolHasSnaps returned when volume has previous snapshots ErrVolHasSnaps = errors.New("Volume has snapshots associated") // ErrNotSupported returned when the operation is not supported ErrNotSupported = errors.New("Operation not supported") // ErrVolBusy returned when volume is in busy state ErrVolBusy = errors.New("Volume is busy") // ErrAborted returned when capacityUsageInfo cannot be returned ErrAborted = errors.New("Aborted CapacityUsage request") // ErrInvalidName returned when Cloudbackup Name/request is invalid ErrInvalidName = errors.New("Invalid name for cloud backup/restore request") // ErrFsResizeFailed returned when Filesystem resize failed because of filesystem // errors ErrFsResizeFailed = errors.New("Filesystem Resize failed due to filesystem errors") // ErrNoVolumeUpdate is returned when a volume update has no changes requested ErrNoVolumeUpdate = errors.New("No change requested") ) // Constants used by the VolumeDriver const ( // APIVersion for the volume management apis APIVersion = "v1" // PluginAPIBase where the docker unix socket resides PluginAPIBase = "/run/docker/plugins/" // DriverAPIBase where the osd unix socket resides DriverAPIBase = "/var/lib/osd/driver/" // MountBase for osd mountpoints MountBase = "/var/lib/osd/mounts/" // VolumeBase for osd volumes VolumeBase = "/var/lib/osd/" ) const ( // LocationConstaint is a label that specifies data location constraint. LocationConstraint = "LocationConstraint" // LocalNode is an alias for this node - similar to localhost. LocalNode = "LocalNode" // FromTrashCan is a label that specified a volume being in the TrashCan FromTrashCan = "FromTrashCan" ) // Store defines the interface for basic volume store operations type Store interface { // Lock volume specified by volumeID. Lock(volumeID string) (interface{}, error) // Lock volume with token obtained from call to Lock. Unlock(token interface{}) error // CreateVol returns error if volume with the same ID already existe. CreateVol(vol *api.Volume) error // GetVol from volumeID. GetVol(volumeID string) (*api.Volume, error) // UpdateVol with vol UpdateVol(vol *api.Volume) error // DeleteVol. Returns error if volume does not exist. DeleteVol(volumeID string) error } // VolumeDriver is the main interface to be implemented by any storage driver. // Every driver must at minimum implement the ProtoDriver sub interface. type VolumeDriver interface { IODriver ProtoDriver BlockDriver Enumerator } // IODriver interfaces applicable to object store interfaces. type IODriver interface { // Read sz bytes from specified volume at specified offset. // Return number of bytes read and error. Read(volumeID string, buf []byte, sz uint64, offset int64) (int64, error) // Write sz bytes from specified volume at specified offset. // Return number of bytes written and error. Write(volumeID string, buf []byte, sz uint64, offset int64) (int64, error) // Flush writes to stable storage. // Return error. Flush(volumeID string) error } // SnapshotDriver interfaces provides snapshot capability type SnapshotDriver interface { // Snapshot create volume snapshot. // Errors ErrEnoEnt may be returned Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator, noRetry bool) (string, error) // Restore restores volume to specified snapshot. Restore(volumeID string, snapshotID string) error // SnapshotGroup takes a snapshot of a group of volumes that can be specified with either of the following // 1. group ID // 2. labels // 3. volumeIDs // deleteOnFailure indicates whether to delete the successful snaps if some of the snapshots failed SnapshotGroup(groupID string, labels map[string]string, volumeIDs []string, deleteOnFailure bool) (*api.GroupSnapCreateResponse, error) } // StatsDriver interface provides stats features type StatsDriver interface { // Stats for specified volume. // cumulative stats are /proc/diskstats style stats. // nonCumulative stats are stats for specific duration. // Errors ErrEnoEnt may be returned Stats(volumeID string, cumulative bool) (*api.Stats, error) // UsedSize returns currently used volume size. // Errors ErrEnoEnt may be returned. UsedSize(volumeID string) (uint64, error) // GetActiveRequests get active requests GetActiveRequests() (*api.ActiveRequests, error) // CapacityUsage returns both exclusive and shared usage // of a snap/volume CapacityUsage(ID string) (*api.CapacityUsageResponse, error) // VolumeUsageByNode returns capacity usage of all volumes and snaps for a // given node VolumeUsageByNode(nodeID string) (*api.VolumeUsageByNode, error) } type QuiesceDriver interface { // Freezes mounted filesystem resulting in a quiesced volume state. // Only one freeze operation may be active at any given time per volume. // Unfreezes after timeout seconds if it is non-zero. // An optional quiesceID can be passed for driver-specific use. Quiesce(volumeID string, timeoutSeconds uint64, quiesceID string) error // Unfreezes mounted filesystem if it was frozen. Unquiesce(volumeID string) error } // CloudBackupDriver interface provides Cloud backup features type CloudBackupDriver interface { // CloudBackupCreate uploads snapshot of a volume to the cloud CloudBackupCreate(input *api.CloudBackupCreateRequest) (*api.CloudBackupCreateResponse, error) // CloudBackupGroupCreate creates and then uploads volumegroup snapshots CloudBackupGroupCreate(input *api.CloudBackupGroupCreateRequest) (*api.CloudBackupGroupCreateResponse, error) // CloudBackupRestore downloads a cloud backup and restores it to a volume CloudBackupRestore(input *api.CloudBackupRestoreRequest) (*api.CloudBackupRestoreResponse, error) // CloudBackupEnumerate enumerates the backups for a given cluster/credential/volumeID CloudBackupEnumerate(input *api.CloudBackupEnumerateRequest) (*api.CloudBackupEnumerateResponse, error) // CloudBackupDelete deletes the specified backup in cloud CloudBackupDelete(input *api.CloudBackupDeleteRequest) error // CloudBackupDelete deletes all the backups for a given volume in cloud CloudBackupDeleteAll(input *api.CloudBackupDeleteAllRequest) error // CloudBackupStatus indicates the most recent status of backup/restores CloudBackupStatus(input *api.CloudBackupStatusRequest) (*api.CloudBackupStatusResponse, error) // CloudBackupCatalog displays listing of backup content CloudBackupCatalog(input *api.CloudBackupCatalogRequest) (*api.CloudBackupCatalogResponse, error) // CloudBackupHistory displays past backup/restore operations on a volume CloudBackupHistory(input *api.CloudBackupHistoryRequest) (*api.CloudBackupHistoryResponse, error) // CloudBackupStateChange allows a current backup state transisions(pause/resume/stop) CloudBackupStateChange(input *api.CloudBackupStateChangeRequest) error // CloudBackupSchedCreate creates a schedule to backup volume to cloud CloudBackupSchedCreate(input *api.CloudBackupSchedCreateRequest) (*api.CloudBackupSchedCreateResponse, error) // CloudBackupGroupSchedCreate creates a schedule to backup a volumegroup to cloud CloudBackupGroupSchedCreate(input *api.CloudBackupGroupSchedCreateRequest) (*api.CloudBackupSchedCreateResponse, error) // CloudBackupSchedCreate creates a schedule to backup volume to cloud CloudBackupSchedUpdate(input *api.CloudBackupSchedUpdateRequest) error // CloudBackupGroupSchedCreate creates a schedule to backup a volumegroup to cloud CloudBackupGroupSchedUpdate(input *api.CloudBackupGroupSchedUpdateRequest) error // CloudBackupSchedDelete delete a backup schedule CloudBackupSchedDelete(input *api.CloudBackupSchedDeleteRequest) error // CloudBackupSchedEnumerate enumerates the configured backup schedules in the cluster CloudBackupSchedEnumerate() (*api.CloudBackupSchedEnumerateResponse, error) // CloudBackupSize fetches the size of a cloud backup CloudBackupSize(input *api.SdkCloudBackupSizeRequest) (*api.SdkCloudBackupSizeResponse, error) } // CloudMigrateDriver interface provides Cloud migration features type CloudMigrateDriver interface { // CloudMigrateStart starts a migrate operation CloudMigrateStart(request *api.CloudMigrateStartRequest) (*api.CloudMigrateStartResponse, error) // CloudMigrateCancel cancels a migrate operation CloudMigrateCancel(request *api.CloudMigrateCancelRequest) error // CloudMigrateStatus returns status for the migration operations CloudMigrateStatus(request *api.CloudMigrateStatusRequest) (*api.CloudMigrateStatusResponse, error) } // FilesystemTrimDriver interface exposes APIs to manage filesystem trim // operation on a volume type FilesystemTrimDriver interface { // FilesystemTrimStart starts a filesystem trim background operation on a // specified volume FilesystemTrimStart(request *api.SdkFilesystemTrimStartRequest) (*api.SdkFilesystemTrimStartResponse, error) // FilesystemTrimStatus returns the status of a filesystem trim // background operation on a specified volume, if any FilesystemTrimStatus(request *api.SdkFilesystemTrimStatusRequest) (*api.SdkFilesystemTrimStatusResponse, error) // FilesystemTrimStop stops a filesystem trim background operation on // a specified volume, if any FilesystemTrimStop(request *api.SdkFilesystemTrimStopRequest) (*api.SdkFilesystemTrimStopResponse, error) } // FilesystemCheckDriver interface exposes APIs to manage filesystem check // operation on a volume type FilesystemCheckDriver interface { // FilesystemCheckStart starts a filesystem check background operation // on a specified volume FilesystemCheckStart(request *api.SdkFilesystemCheckStartRequest) (*api.SdkFilesystemCheckStartResponse, error) // FilesystemCheckStatus returns the status of a filesystem check // background operation on the filesystem of a specified volume, if any. FilesystemCheckStatus(request *api.SdkFilesystemCheckStatusRequest) (*api.SdkFilesystemCheckStatusResponse, error) // FilesystemCheckStop stops the filesystem check background operation on // the filesystem of a specified volume, if any. FilesystemCheckStop(request *api.SdkFilesystemCheckStopRequest) (*api.SdkFilesystemCheckStopResponse, error) } // ProtoDriver must be implemented by all volume drivers. It specifies the // most basic functionality, such as creating and deleting volumes. type ProtoDriver interface { SnapshotDriver StatsDriver QuiesceDriver CredsDriver CloudBackupDriver CloudMigrateDriver FilesystemTrimDriver FilesystemCheckDriver // Name returns the name of the driver. Name() string // Type of this driver Type() api.DriverType // Version information of the driver Version() (*api.StorageVersion, error) // Create a new Vol for the specific volume spec. // It returns a system generated VolumeID that uniquely identifies the volume Create(locator *api.VolumeLocator, Source *api.Source, spec *api.VolumeSpec) (string, error) // Delete volume. // Errors ErrEnoEnt, ErrVolHasSnaps may be returned. Delete(volumeID string) error // Mount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. Mount(volumeID string, mountPath string, options map[string]string) error // MountedAt return volume mounted at specified mountpath. MountedAt(mountPath string) string // Unmount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. Unmount(volumeID string, mountPath string, options map[string]string) error // Update not all fields of the spec are supported, ErrNotSupported will be thrown for unsupported // updates. Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. Status() [][2]string // Shutdown and cleanup. Shutdown() // DU specified volume and potentially the subfolder if provided. Catalog(volumeid, subfolder string, depth string) (api.CatalogResponse, error) // Does a Filesystem Trim operation to free unused space to block device(block discard) VolService(volumeID string, vsreq *api.VolumeServiceRequest) (*api.VolumeServiceResponse, error) } // Enumerator provides a set of interfaces to get details on a set of volumes. type Enumerator interface { // Inspect specified volumes. // Returns slice of volumes that were found. Inspect(volumeIDs []string) ([]*api.Volume, error) // Enumerate volumes that map to the volumeLocator. Locator fields may be regexp. // If locator fields are left blank, this will return all volumes. Enumerate(locator *api.VolumeLocator, labels map[string]string) ([]*api.Volume, error) // Enumerate snaps for specified volumes SnapEnumerate(volID []string, snapLabels map[string]string) ([]*api.Volume, error) } // StoreEnumerator combines Store and Enumerator capabilities type StoreEnumerator interface { Store Enumerator } // BlockDriver needs to be implemented by block volume drivers. Filesystem volume // drivers can ignore this interface and include the builtin DefaultBlockDriver. type BlockDriver interface { // Attach map device to the host. // On success the devicePath specifies location where the device is exported // Errors ErrEnoEnt, ErrVolAttached may be returned. Attach(volumeID string, attachOptions map[string]string) (string, error) // Detach device from the host. // Errors ErrEnoEnt, ErrVolDetached may be returned. Detach(volumeID string, options map[string]string) error } // CredsDriver provides methods to handle credentials type CredsDriver interface { // CredsCreate creates credential for a given cloud provider CredsCreate(params map[string]string) (string, error) // CredsUpdate updates credential for an already configured credential CredsUpdate(name string, params map[string]string) error // CredsEnumerate lists the configured credentials in the cluster CredsEnumerate() (map[string]interface{}, error) // CredsDelete deletes the credential associated credUUID CredsDelete(credUUID string) error // CredsValidate validates the credential associated credUUID CredsValidate(credUUID string) error // CredsDeleteReferences delets any with the creds CredsDeleteReferences(credUUID string) error } // VolumeDriverProvider provides VolumeDrivers. type VolumeDriverProvider interface { // Get gets the VolumeDriver for the given name. // If a VolumeDriver was not created for the given name, the error ErrDriverNotFound is returned. Get(name string) (VolumeDriver, error) // Shutdown shuts down all volume drivers. Shutdown() error } // VolumeDriverRegistry registers VolumeDrivers. type VolumeDriverRegistry interface { VolumeDriverProvider // New creates the VolumeDriver for the given name. // If a VolumeDriver was already created for the given name, the error ErrExist is returned. Register(name string, params map[string]string) error // Add inserts a new VolumeDriver provider with a well known name. Add(name string, init func(map[string]string) (VolumeDriver, error)) error // Removes driver from registry. Does nothing if driver name does not exist. Remove(name string) } // NewVolumeDriverRegistry constructs a new VolumeDriverRegistry. func NewVolumeDriverRegistry(nameToInitFunc map[string]func(map[string]string) (VolumeDriver, error)) VolumeDriverRegistry { return newVolumeDriverRegistry(nameToInitFunc) }
1
8,876
This can return bool. RelaxedReclaimPurge(nodeID string) (bool, error)
libopenstorage-openstorage
go
@@ -60,7 +60,7 @@ public class GeneralStateReferenceTestTools { final String eips = System.getProperty( "test.ethereum.state.eips", - "Frontier,Homestead,EIP150,EIP158,Byzantium,Constantinople,ConstantinopleFix,Istanbul,Berlin"); + "Frontier,Homestead,EIP150,EIP158,Byzantium,Constantinople,ConstantinopleFix,Istanbul,Berlin,London"); EIPS_TO_RUN = Arrays.asList(eips.split(",")); }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.vm; import static org.assertj.core.api.Assertions.assertThat; import org.hyperledger.besu.ethereum.core.Account; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.core.Hash; import org.hyperledger.besu.ethereum.core.Log; import org.hyperledger.besu.ethereum.core.MutableWorldState; import org.hyperledger.besu.ethereum.core.Transaction; import org.hyperledger.besu.ethereum.core.WorldState; import org.hyperledger.besu.ethereum.core.WorldUpdater; import org.hyperledger.besu.ethereum.mainnet.MainnetTransactionProcessor; import org.hyperledger.besu.ethereum.mainnet.TransactionValidationParams; import org.hyperledger.besu.ethereum.processing.TransactionProcessingResult; import org.hyperledger.besu.ethereum.referencetests.GeneralStateTestCaseEipSpec; import org.hyperledger.besu.ethereum.referencetests.GeneralStateTestCaseSpec; import org.hyperledger.besu.ethereum.referencetests.ReferenceTestBlockchain; import org.hyperledger.besu.ethereum.referencetests.ReferenceTestProtocolSchedules; import org.hyperledger.besu.ethereum.rlp.RLP; import org.hyperledger.besu.ethereum.worldstate.DefaultMutableWorldState; import org.hyperledger.besu.testutil.JsonTestParameters; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; public class GeneralStateReferenceTestTools { private static final ReferenceTestProtocolSchedules REFERENCE_TEST_PROTOCOL_SCHEDULES = ReferenceTestProtocolSchedules.create(); private static final List<String> SPECS_PRIOR_TO_DELETING_EMPTY_ACCOUNTS = Arrays.asList("Frontier", "Homestead", "EIP150"); private static MainnetTransactionProcessor transactionProcessor(final String name) { return REFERENCE_TEST_PROTOCOL_SCHEDULES .getByName(name) .getByBlockNumber(0) .getTransactionProcessor(); } private static final List<String> EIPS_TO_RUN; static { final String eips = System.getProperty( "test.ethereum.state.eips", "Frontier,Homestead,EIP150,EIP158,Byzantium,Constantinople,ConstantinopleFix,Istanbul,Berlin"); EIPS_TO_RUN = Arrays.asList(eips.split(",")); } private static final JsonTestParameters<?, ?> params = JsonTestParameters.create(GeneralStateTestCaseSpec.class, GeneralStateTestCaseEipSpec.class) .generator( (testName, stateSpec, collector) -> { final String prefix = testName + "-"; for (final Map.Entry<String, List<GeneralStateTestCaseEipSpec>> entry : stateSpec.finalStateSpecs().entrySet()) { final String eip = entry.getKey(); final boolean runTest = EIPS_TO_RUN.contains(eip); final List<GeneralStateTestCaseEipSpec> eipSpecs = entry.getValue(); if (eipSpecs.size() == 1) { collector.add(prefix + eip, eipSpecs.get(0), runTest); } else { for (int i = 0; i < eipSpecs.size(); i++) { collector.add(prefix + eip + '[' + i + ']', eipSpecs.get(i), runTest); } } } }); static { if (EIPS_TO_RUN.isEmpty()) { params.ignoreAll(); } // Known incorrect test. params.ignore( "RevertPrecompiledTouch(_storage)?-(EIP158|Byzantium|Constantinople|ConstantinopleFix)"); // Gas integer value is too large to construct a valid transaction. params.ignore("OverflowGasRequire"); // Consumes a huge amount of memory params.ignore("static_Call1MB1024Calldepth-\\w"); params.ignore("ShanghaiLove_.*"); // Don't do time consuming tests params.ignore("CALLBlake2f_MaxRounds.*"); } public static Collection<Object[]> generateTestParametersForConfig(final String[] filePath) { return params.generate(filePath); } public static void executeTest(final GeneralStateTestCaseEipSpec spec) { final BlockHeader blockHeader = spec.getBlockHeader(); final WorldState initialWorldState = spec.getInitialWorldState(); final Transaction transaction = spec.getTransaction(); final MutableWorldState worldState = new DefaultMutableWorldState(initialWorldState); // Several of the GeneralStateTests check if the transaction could potentially // consume more gas than is left for the block it's attempted to be included in. // This check is performed within the `BlockImporter` rather than inside the // `TransactionProcessor`, so these tests are skipped. if (transaction.getGasLimit() > blockHeader.getGasLimit() - blockHeader.getGasUsed()) { return; } final MainnetTransactionProcessor processor = transactionProcessor(spec.getFork()); final WorldUpdater worldStateUpdater = worldState.updater(); final ReferenceTestBlockchain blockchain = new ReferenceTestBlockchain(blockHeader.getNumber()); final TransactionProcessingResult result = processor.processTransaction( blockchain, worldStateUpdater, blockHeader, transaction, blockHeader.getCoinbase(), new BlockHashLookup(blockHeader, blockchain), false, TransactionValidationParams.processingBlock()); final Account coinbase = worldStateUpdater.getOrCreate(spec.getBlockHeader().getCoinbase()); if (coinbase != null && coinbase.isEmpty() && shouldClearEmptyAccounts(spec.getFork())) { worldStateUpdater.deleteAccount(coinbase.getAddress()); } worldStateUpdater.commit(); // Check the world state root hash. final Hash expectedRootHash = spec.getExpectedRootHash(); assertThat(worldState.rootHash()) .withFailMessage("Unexpected world state root hash; computed state: %s", worldState) .isEqualTo(expectedRootHash); // Check the logs. final Hash expectedLogsHash = spec.getExpectedLogsHash(); Optional.ofNullable(expectedLogsHash) .ifPresent( expected -> { final List<Log> logs = result.getLogs(); assertThat(Hash.hash(RLP.encode(out -> out.writeList(logs, Log::writeTo)))) .withFailMessage("Unmatched logs hash. Generated logs: %s", logs) .isEqualTo(expected); }); } private static boolean shouldClearEmptyAccounts(final String eip) { return !SPECS_PRIOR_TO_DELETING_EMPTY_ACCOUNTS.contains(eip); } }
1
25,379
Is there something we can do to make this automatic? At the very least, can we add Shanghai, Cancun, etc. here now so that we don't forget them?
hyperledger-besu
java
@@ -129,7 +129,7 @@ def add_arguments_to_parser(parser): parser.add_argument('--skip-db-cleanup', dest="skip_db_cleanup", action='store_true', - default=True, + default=False, required=False, help="Skip performing cleanup jobs on the database " "like removing unused files.")
1
# ------------------------------------------------------------------------- # The CodeChecker Infrastructure # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # ------------------------------------------------------------------------- """ Handler for the subcommand that is used to start and manage CodeChecker servers, which are used to query analysis report information. """ import argparse import errno import json import os import socket import sys from alembic import config from alembic import script from sqlalchemy.orm import sessionmaker from shared.ttypes import DBStatus from libcodechecker import generic_package_context from libcodechecker import generic_package_suppress_handler from libcodechecker import host_check from libcodechecker import output_formatters from libcodechecker import session_manager from libcodechecker import util from libcodechecker.analyze import analyzer_env from libcodechecker.logger import LoggerFactory from libcodechecker.logger import add_verbose_arguments from libcodechecker.server import server from libcodechecker.server import instance_manager from libcodechecker.server.database import database from libcodechecker.server.database import database_status from libcodechecker.server.database.config_db_model \ import IDENTIFIER as CONFIG_META from libcodechecker.server.database.config_db_model \ import Product as ORMProduct from libcodechecker.server.database.run_db_model \ import IDENTIFIER as RUN_META LOG = LoggerFactory.get_new_logger('SERVER') def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ return { 'prog': 'CodeChecker server', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter, # Description is shown when the command's help is queried directly 'description': "The CodeChecker Web server is used to handle the " "storage and navigation of analysis results. A " "started server can be connected to via a Web " "browser, or by using the 'CodeChecker cmd' " "command-line client.", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "Start and manage the CodeChecker Web server." } def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ # TODO: --workspace is an outdated concept in 'store'. Later on, # it shall be deprecated, as changes to db_handler commence. parser.add_argument('-w', '--workspace', type=str, dest="workspace", default=util.get_default_workspace(), required=False, help="Directory where CodeChecker can store analysis " "result related data, such as the database. " "(Cannot be specified at the same time with " "'--sqlite' or '--config-directory'.)") parser.add_argument('-f', '--config-directory', type=str, dest="config_directory", default=util.get_default_workspace(), required=False, help="Directory where CodeChecker server should read " "server-specific configuration (such as " "authentication settings, SSL certificate" " (cert.pem) and key (key.pem)) from.") parser.add_argument('--host', type=str, dest="listen_address", default="localhost", required=False, help="The IP address or hostname of the server on " "which it should listen for connections.") # TODO: -v/--view-port is too verbose. The server's -p/--port is used # symmetrically in 'CodeChecker cmd' anyways. parser.add_argument('-v', '--view-port', # TODO: <- Deprecate and remove. '-p', '--port', type=int, dest="view_port", metavar='PORT', default=8001, required=False, help="The port which will be used as listen port for " "the server.") # TODO: This should be removed later on, in favour of --host. parser.add_argument('--not-host-only', dest="not_host_only", action="store_true", required=False, help="If specified, storing and viewing the results " "will be possible not only by browsers and " "clients running locally, but to everyone, who " "can access the server over the Internet. " "(Equivalent to specifying '--host \"\"'.)") parser.add_argument('--skip-db-cleanup', dest="skip_db_cleanup", action='store_true', default=True, required=False, help="Skip performing cleanup jobs on the database " "like removing unused files.") dbmodes = parser.add_argument_group("configuration database arguments") dbmodes = dbmodes.add_mutually_exclusive_group(required=False) dbmodes.add_argument('--sqlite', type=str, dest="sqlite", metavar='SQLITE_FILE', default=os.path.join( '<CONFIG_DIRECTORY>', "config.sqlite"), required=False, help="Path of the SQLite database file to use.") dbmodes.add_argument('--postgresql', dest="postgresql", action='store_true', required=False, default=argparse.SUPPRESS, help="Specifies that a PostgreSQL database is to be " "used instead of SQLite. See the \"PostgreSQL " "arguments\" section on how to configure the " "database connection.") pgsql = parser.add_argument_group("PostgreSQL arguments", "Values of these arguments are ignored, " "unless '--postgresql' is specified!") # TODO: --dbSOMETHING arguments are kept to not break interface from # old command. Database using commands such as "CodeChecker store" no # longer supports these --- it would be ideal to break and remove args # with this style and only keep --db-SOMETHING. pgsql.add_argument('--dbaddress', '--db-host', type=str, dest="dbaddress", default="localhost", required=False, help="Database server address.") pgsql.add_argument('--dbport', '--db-port', type=int, dest="dbport", default=5432, required=False, help="Database server port.") pgsql.add_argument('--dbusername', '--db-username', type=str, dest="dbusername", default='codechecker', required=False, help="Username to use for connection.") pgsql.add_argument('--dbname', '--db-name', type=str, dest="dbname", default="config", required=False, help="Name of the database to use.") root_account = parser.add_argument_group( "root account arguments", "Servers automatically create a root user to access the server's " "configuration via the clients. This user is created at first start " "and saved in the CONFIG_DIRECTORY, and the credentials are printed " "to the server's standard output. The plaintext credentials are " "NEVER accessible again.") root_account.add_argument('--reset-root', dest="reset_root", action='store_true', default=argparse.SUPPRESS, required=False, help="Force the server to recreate the master " "superuser (root) account name and " "password. The previous credentials will " "be invalidated, and the new ones will be " "printed to the standard output.") root_account.add_argument('--force-authentication', dest="force_auth", action='store_true', default=argparse.SUPPRESS, required=False, help="Force the server to run in " "authentication requiring mode, despite " "the configuration value in " "'session_config.json'. This is needed " "if you need to edit the product " "configuration of a server that would not " "require authentication otherwise.") instance_mgmnt = parser.add_argument_group("running server management") instance_mgmnt = instance_mgmnt. \ add_mutually_exclusive_group(required=False) instance_mgmnt.add_argument('-l', '--list', dest="list", action='store_true', default=argparse.SUPPRESS, required=False, help="List the servers that has been started " "by you.") # TODO: '-s' was removed from 'quickcheck', it shouldn't be here either? instance_mgmnt.add_argument('-s', '--stop', dest="stop", action='store_true', default=argparse.SUPPRESS, required=False, help="Stops the server associated with " "the given view-port and workspace.") instance_mgmnt.add_argument('--stop-all', dest="stop_all", action='store_true', default=argparse.SUPPRESS, required=False, help="Stops all of your running CodeChecker " "server instances.") database_mgmnt = parser.add_argument_group( "Database management arguments.", """WARNING these commands needs to be called with the same workspace and configuration arguments as the server so the configuration database will be found which is required for the schema migration. Migration can be done without a running server but pay attention to use the same arguments which will be used to start the server. NOTE: Before migration it is advised to create a full a backup of the product databases. """) database_mgmnt = database_mgmnt. \ add_mutually_exclusive_group(required=False) database_mgmnt.add_argument('--db-status', type=str, dest="status", action='store', default=argparse.SUPPRESS, required=False, help="Name of the product to get " "the database status for. " "Use 'all' to list the database " "statuses for all of the products.") database_mgmnt.add_argument('--db-upgrade-schema', type=str, dest='product_to_upgrade', action='store', default=argparse.SUPPRESS, required=False, help="Name of the product to upgrade to the " "latest database schema available in " "the package. Use 'all' to upgrade all " "of the products." "NOTE: Before migration it is advised" " to create a full backup of " "the product databases.") add_verbose_arguments(parser) def __handle(args): """Custom handler for 'server' so custom error messages can be printed without having to capture 'parser' in main.""" def arg_match(options): return util.arg_match(options, sys.argv[1:]) # See if there is a "PostgreSQL argument" specified in the invocation # without '--postgresql' being there. There is no way to distinguish # a default argument and a deliberately specified argument without # inspecting sys.argv. options = ['--dbaddress', '--dbport', '--dbusername', '--dbname', '--db-host', '--db-port', '--db-username', '--db-name'] psql_args_matching = arg_match(options) if any(psql_args_matching) and\ 'postgresql' not in args: first_matching_arg = next(iter([match for match in psql_args_matching])) parser.error("argument {0}: not allowed without " "argument --postgresql".format(first_matching_arg)) # parser.error() terminates with return code 2. # --not-host-only is a "shortcut", actually a to-be-deprecated # call which means '--host ""'. # TODO: Actually deprecate --not-host-only later on. options = ['--not-host-only', '--host'] if set(arg_match(options)) == set(options): parser.error("argument --not-host-only: not allowed with " "argument --host, as it is a shortcut to --host " "\"\"") else: # Apply the shortcut. if len(arg_match(['--not-host-only'])) > 0: args.listen_address = "" # Listen on every interface. # --not-host-only is just a shortcut optstring, no actual use # is intended later on. delattr(args, 'not_host_only') # --workspace and --sqlite cannot be specified either, as # both point to a database location. options = ['--sqlite', '--workspace'] options_short = ['--sqlite', '-w'] if set(arg_match(options)) == set(options) or \ set(arg_match(options_short)) == set(options_short): parser.error("argument --sqlite: not allowed with " "argument --workspace") # --workspace and --config-directory also aren't allowed together now, # the latter one is expected to replace the earlier. options = ['--config-directory', '--workspace'] options_short = ['--config-directory', '-w'] if set(arg_match(options)) == set(options) or \ set(arg_match(options_short)) == set(options_short): parser.error("argument --config-directory: not allowed with " "argument --workspace") # If workspace is specified, sqlite is workspace/config.sqlite # and config_directory is the workspace directory. if len(arg_match(['--workspace', '-w'])) > 0: args.config_directory = args.workspace args.sqlite = os.path.join(args.workspace, 'config.sqlite') setattr(args, 'dbdatadir', os.path.join(args.workspace, 'pgsql_data')) # Workspace should not exist as a Namespace key. delattr(args, 'workspace') if '<CONFIG_DIRECTORY>' in args.sqlite: # Replace the placeholder variable with the actual value. args.sqlite = args.sqlite.replace('<CONFIG_DIRECTORY>', args.config_directory) if 'postgresql' not in args: # Later called database modules need the argument to be actually # present, even though the default is suppressed in the optstring. setattr(args, 'postgresql', False) # This is not needed by the database starter as we are # running SQLite. if 'dbdatadir' in args: delattr(args, 'dbdatadir') else: # If --postgresql is given, --sqlite is useless. delattr(args, 'sqlite') # If everything is fine, do call the handler for the subcommand. main(args) parser.set_defaults(func=__handle) def print_prod_status(prod_status): """ Print the database statuses for each of the products. """ header = ['Product endpoint', 'Database status', 'Database location', 'Schema version in the database', 'Schema version in the package'] rows = [] for k, v in prod_status.items(): db_status, schema_ver, package_ver, db_location = v db_status_msg = database_status.db_status_msg.get(db_status) if schema_ver == package_ver: schema_ver += " (up to date)" rows.append([k, db_status_msg, db_location, schema_ver, package_ver]) prod_status = output_formatters.twodim_to_str('table', header, rows, sortby_column_number=0) print(prod_status) def get_schema_version_from_package(migration_root): """ Returns the latest schema version in the package. """ cfg = config.Config() cfg.set_main_option("script_location", migration_root) pckg_schema_ver = script.ScriptDirectory.from_config(cfg) return pckg_schema_ver.get_current_head() def check_product_db_status(cfg_sql_server, context): """ Check the products for database statuses. :returns: dictionary of product endpoints with database statuses """ migration_root = context.run_migration_root engine = cfg_sql_server.create_engine() config_session = sessionmaker(bind=engine) sess = config_session() try: products = sess.query(ORMProduct).all() except Exception as ex: LOG.debug(ex) LOG.error("Failed to get product configurations from the database.") LOG.error("Please check your command arguments.") sys.exit(1) package_schema = get_schema_version_from_package(migration_root) db_errors = [DBStatus.FAILED_TO_CONNECT, DBStatus.MISSING, DBStatus.SCHEMA_INIT_ERROR, DBStatus.SCHEMA_MISSING] prod_status = {} for pd in products: db = database.SQLServer.from_connection_string(pd.connection, RUN_META, migration_root, interactive=False) db_location = db.get_db_location() ret = db.connect() s_ver = db.get_schema_version() if s_ver in db_errors: s_ver = None prod_status[pd.endpoint] = (ret, s_ver, package_schema, db_location) sess.commit() sess.close() engine.dispose() return prod_status def __db_status_check(cfg_sql_server, context, product_name=None): """ Check and print database statuses for the given product. """ if not product_name: return 0 LOG.debug("Checking database status for " + product_name + " product.") prod_statuses = check_product_db_status(cfg_sql_server, context) if product_name != 'all': avail = prod_statuses.get(product_name) if not avail: LOG.error("No product was found with this endpoint: " + str(product_name)) return 1 prod_statuses = {k: v for k, v in prod_statuses.items() if k == product_name} print_prod_status(prod_statuses) return 0 def __db_migration(cfg_sql_server, context, product_to_upgrade='all'): """ Handle database management. Schema checking and migration. """ LOG.info("Preparing schema upgrade for " + str(product_to_upgrade)) product_name = product_to_upgrade prod_statuses = check_product_db_status(cfg_sql_server, context) prod_to_upgrade = [] if product_name != 'all': avail = prod_statuses.get(product_name) if not avail: LOG.error("No product was found with this endpoint: " + product_name) return 1 prod_to_upgrade.append(product_name) else: prod_to_upgrade = list(prod_statuses.keys()) migration_root = context.run_migration_root LOG.warning("Please note after migration only " "newer CodeChecker versions can be used " "to start the server") LOG.warning("It is advised to make a full backup of your " "run databases.") for prod in prod_to_upgrade: LOG.info("========================") LOG.info("Checking: " + prod) engine = cfg_sql_server.create_engine() config_session = sessionmaker(bind=engine) sess = config_session() product = sess.query(ORMProduct).filter( ORMProduct.endpoint == prod).first() db = database.SQLServer.from_connection_string(product.connection, RUN_META, migration_root, interactive=False) db_status = db.connect() msg = database_status.db_status_msg.get(db_status, 'Unknown database status') LOG.info(msg) if db_status == DBStatus.SCHEMA_MISSING: question = 'Do you want to initialize a new schema for ' \ + product.endpoint + '? Y(es)/n(o) ' if util.get_user_input(question): ret = db.connect(init=True) msg = database_status.db_status_msg.get( ret, 'Unknown database status') else: LOG.info("No schema initialization was done.") elif db_status == DBStatus.SCHEMA_MISMATCH_OK: question = 'Do you want to upgrade to new schema for ' \ + product.endpoint + '? Y(es)/n(o) ' if util.get_user_input(question): LOG.info("Upgrading schema ...") ret = db.upgrade() LOG.info("Done.") msg = database_status.db_status_msg.get( ret, 'Unknown database status') else: LOG.info("No schema migration was done.") sess.commit() sess.close() engine.dispose() LOG.info("========================") return 0 def __instance_management(args): """Handles the instance-manager commands --list/--stop/--stop-all.""" # TODO: The server stopping and listing must be revised on its invocation # once "workspace", as a concept, is removed. # QUESTION: What is the bestest way here to identify a server for the user? if 'list' in args: instances = instance_manager.list() instances_on_multiple_hosts = any(True for inst in instances if inst['hostname'] != socket.gethostname()) if not instances_on_multiple_hosts: head = ['Workspace', 'View port'] else: head = ['Workspace', 'Computer host', 'View port'] rows = [] for instance in instance_manager.list(): if not instances_on_multiple_hosts: rows.append((instance['workspace'], str(instance['port']))) else: rows.append((instance['workspace'], instance['hostname'] if instance['hostname'] != socket.gethostname() else '', str(instance['port']))) print("Your running CodeChecker servers:") print(output_formatters.twodim_to_str('table', head, rows)) elif 'stop' in args or 'stop_all' in args: for i in instance_manager.list(): if i['hostname'] != socket.gethostname(): continue # A STOP only stops the server associated with the given workspace # and view-port. if 'stop' in args and \ not (i['port'] == args.view_port and os.path.abspath(i['workspace']) == os.path.abspath(args.config_directory)): continue try: util.kill_process_tree(i['pid']) LOG.info("Stopped CodeChecker server running on port {0} " "in workspace {1} (PID: {2})". format(i['port'], i['workspace'], i['pid'])) except: # Let the exception come out if the commands fail LOG.error("Couldn't stop process PID #" + str(i['pid'])) raise def main(args): """ Start or manage a CodeChecker report server. """ if 'list' in args or 'stop' in args or 'stop_all' in args: __instance_management(args) sys.exit(0) # Actual server starting from this point. if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # WARNING # In case of SQLite args.dbaddress default value is used # for which the is_localhost should return true. if util.is_localhost(args.dbaddress) and \ not os.path.exists(args.config_directory): os.makedirs(args.config_directory) # Make sure the SQLite file can be created if it not exists. if 'sqlite' in args and \ not os.path.isdir(os.path.dirname(args.sqlite)): os.makedirs(os.path.dirname(args.sqlite)) if 'reset_root' in args: try: os.remove(os.path.join(args.config_directory, 'root.user')) LOG.info("Master superuser (root) credentials invalidated and " "deleted. New ones will be generated...") except OSError: # File doesn't exist. pass if 'force_auth' in args: LOG.info("'--force-authentication' was passed as a command-line " "option. The server will ask for users to authenticate!") context = generic_package_context.get_context() context.codechecker_workspace = args.config_directory session_manager.SessionManager.CodeChecker_Workspace = \ args.config_directory context.db_username = args.dbusername check_env = analyzer_env.get_check_env(context.path_env_extra, context.ld_lib_path_extra) cfg_sql_server = database.SQLServer.from_cmdline_args( vars(args), CONFIG_META, context.config_migration_root, interactive=True, env=check_env) LOG.info("Checking configuration database ...") db_status = cfg_sql_server.connect() db_status_msg = database_status.db_status_msg.get(db_status) LOG.info(db_status_msg) if db_status == DBStatus.SCHEMA_MISSING: LOG.debug("Config database schema is missing, initializing new.") db_status = cfg_sql_server.connect(init=True) if db_status != DBStatus.OK: LOG.error("Config database initialization failed!") LOG.error("Please check debug logs.") sys.exit(1) if db_status == DBStatus.SCHEMA_MISMATCH_NO: LOG.debug("Configuration database schema mismatch.") LOG.debug("No schema upgrade is possible.") sys.exit(1) if db_status == DBStatus.SCHEMA_MISMATCH_OK: LOG.debug("Configuration database schema mismatch.") LOG.debug("Schema upgrade is possible.") LOG.warning("Please note after migration only " "newer CodeChecker versions can be used" "to start the server") LOG.warning("It is advised to make a full backup of your " "configuration database") LOG.warning(cfg_sql_server.get_db_location()) question = 'Do you want to upgrade to the new schema?' \ ' Y(es)/n(o) ' if util.get_user_input(question): print("Upgrading schema ...") ret = cfg_sql_server.upgrade() msg = database_status.db_status_msg.get( ret, 'Unknown database status') print(msg) if ret != DBStatus.OK: LOG.error("Schema migration failed") syst.exit(ret) else: LOG.info("No schema migration was done.") sys.exit(0) if db_status == DBStatus.MISSING: LOG.error("Missing configuration database.") LOG.error("Server can not be started.") sys.exit(1) # Configuration database setup and check is needed before database # statuses can be checked. try: if args.status: ret = __db_status_check(cfg_sql_server, context, args.status) sys.exit(ret) except AttributeError: LOG.debug('Status was not in the arguments.') try: if args.product_to_upgrade: ret = __db_migration(cfg_sql_server, context, args.product_to_upgrade) sys.exit(ret) except AttributeError: LOG.debug('Product upgrade was not in the arguments.') # Create the main database link from the arguments passed over the # command line. default_product_path = os.path.join(args.config_directory, 'Default.sqlite') create_default_product = 'sqlite' in args and \ not os.path.exists(default_product_path) if create_default_product: # Create a default product and add it to the configuration database. LOG.debug("Create default product...") LOG.debug("Configuring schema and migration...") prod_server = database.SQLiteDatabase( default_product_path, RUN_META, context.run_migration_root, check_env) LOG.debug("Checking 'Default' product database.") db_status = prod_server.connect() if db_status != DBStatus.MISSING: db_status = prod_server.connect(init=True) LOG.error(database_status.db_status_msg.get(db_status)) if db_status != DBStatus.OK: LOG.error("Failed to configure default product") sys.exit(1) product_conn_string = prod_server.get_connection_string() server.add_initial_run_database( cfg_sql_server, product_conn_string) LOG.info("Product 'Default' at '{0}' created and set up." .format(default_product_path)) prod_statuses = check_product_db_status(cfg_sql_server, context) upgrade_available = {} for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status == DBStatus.SCHEMA_MISMATCH_OK or \ db_status == DBStatus.SCHEMA_MISSING: upgrade_available[k] = v if upgrade_available: print_prod_status(prod_statuses) LOG.warning("Multiple products can be upgraded, make a backup!") __db_migration(cfg_sql_server, context) prod_statuses = check_product_db_status(cfg_sql_server, context) print_prod_status(prod_statuses) non_ok_db = False for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status != DBStatus.OK: non_ok_db = True break if non_ok_db: msg = "There are some database issues. " \ "Do you want to start the " \ "server? Y(es)/n(o) " if not util.get_user_input(msg): sys.exit(1) # Start database viewer. checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs') checker_md_docs_map = os.path.join(checker_md_docs, 'checker_doc_map.json') with open(checker_md_docs_map, 'r') as dFile: checker_md_docs_map = json.load(dFile) package_data = {'www_root': context.www_root, 'doc_root': context.doc_root, 'checker_md_docs': checker_md_docs, 'checker_md_docs_map': checker_md_docs_map, 'version': context.package_git_tag} suppress_handler = generic_package_suppress_handler. \ GenericSuppressHandler(None, False) try: server.start_server(args.config_directory, package_data, args.view_port, cfg_sql_server, suppress_handler, args.listen_address, 'force_auth' in args, 'skip_db_cleanup' not in args, context, check_env) except socket.error as err: if err.errno == errno.EADDRINUSE: LOG.error("Server can't be started, maybe the given port number " "({}) is already used. Check the connection " "parameters.".format(args.view_port)) sys.exit(1) else: raise
1
8,853
For boolean values, I'd rather have `argparse.SUPPRESS` as default value. No need to have a `(default: False)` in the help if it's a toggle.
Ericsson-codechecker
c
@@ -251,6 +251,7 @@ func initSyncTest(t *testing.T, con consensus.Protocol, genFunc func(cst *hamt.C chainStore := chain.NewStore(chainDS, cst, &state.TreeStateLoader{}, calcGenBlk.Cid()) fetcher := th.NewTestFetcher() + fetcher.AddSourceBlocks(calcGenBlk) syncer := chain.NewSyncer(con, chainStore, fetcher, syncMode) // note we use same cst for on and offline for tests // Initialize stores to contain dstP.genesis block and state
1
package chain_test import ( "context" "testing" bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" "github.com/ipfs/go-hamt-ipld" bstore "github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-exchange-offline" "github.com/libp2p/go-libp2p-peer" "github.com/filecoin-project/go-filecoin/actor/builtin" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/chain" "github.com/filecoin-project/go-filecoin/consensus" "github.com/filecoin-project/go-filecoin/gengen/util" "github.com/filecoin-project/go-filecoin/proofs/verification" "github.com/filecoin-project/go-filecoin/repo" "github.com/filecoin-project/go-filecoin/state" th "github.com/filecoin-project/go-filecoin/testhelpers" tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags" "github.com/filecoin-project/go-filecoin/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // This file contains test that use a full syncer dependency structure, including store and // consensus implementations. // See syncer_test.go for most syncer unit tests. // The tests here should probably be reworked to follow the unit test patterns, and this // integration test setup reserved for a few "sunny day" tests of integration specifically. type SyncerTestParams struct { // Chain diagram below. Note that blocks in the same tipset are in parentheses. // // genesis -> (link1blk1, link1blk2) -> (link2blk1, link2blk2, link2blk3) -> link3blk1 -> (link4blk1, link4blk2) // Blocks genesis *types.Block link1blk1, link1blk2 *types.Block link2blk1, link2blk2, link2blk3 *types.Block link3blk1 *types.Block link4blk1, link4blk2 *types.Block // Cids genCid cid.Cid genStateRoot, link1State, link2State, link3State, link4State cid.Cid // TipSets genTS, link1, link2, link3, link4 types.TipSet // utils cidGetter func() cid.Cid minerAddress address.Address minerOwnerAddress address.Address minerPeerID peer.ID } func initDSTParams() *SyncerTestParams { var err error minerAddress, err := address.NewActorAddress([]byte("miner")) if err != nil { panic(err) } minerOwnerAddress, err := address.NewActorAddress([]byte("minerOwner")) if err != nil { panic(err) } minerPeerID, err := th.RandPeerID() if err != nil { panic(err) } // Set up the test chain bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) cst := hamt.NewCborStore() genesis, err := initGenesis(minerAddress, minerOwnerAddress, minerPeerID, cst, bs) if err != nil { panic(err) } genCid := genesis.Cid() genTS := th.MustNewTipSet(genesis) // mock state root cids cidGetter := types.NewCidForTestGetter() genStateRoot := genesis.StateRoot return &SyncerTestParams{ minerAddress: minerAddress, minerOwnerAddress: minerOwnerAddress, minerPeerID: minerPeerID, genesis: genesis, genCid: genCid, genTS: genTS, cidGetter: cidGetter, genStateRoot: genStateRoot, } } // This function sets global variables according to the tests needs. The // test chain's basic structure is always the same, but some tests want // mocked stateRoots or parent weight calculations from different consensus protocols. func requireSetTestChain(t *testing.T, con consensus.Protocol, mockStateRoots bool, dstP *SyncerTestParams) { var err error // see powerTableForWidenTest minerPower := types.NewBytesAmount(25) totalPower := types.NewBytesAmount(100) mockSigner, _ := types.NewMockSignersAndKeyInfo(1) minerWorker := mockSigner.Addresses[0] fakeChildParams := th.FakeChildParams{ Parent: dstP.genTS, GenesisCid: dstP.genCid, StateRoot: dstP.genStateRoot, Consensus: con, MinerAddr: dstP.minerAddress, MinerWorker: minerWorker, Signer: mockSigner, } dstP.link1blk1 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link1blk1.Proof, dstP.link1blk1.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) dstP.link1blk2 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link1blk2.Proof, dstP.link1blk2.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) dstP.link1 = th.RequireNewTipSet(t, dstP.link1blk1, dstP.link1blk2) if mockStateRoots { dstP.link1State = dstP.cidGetter() } else { dstP.link1State = dstP.genStateRoot } fakeChildParams.Parent = dstP.link1 fakeChildParams.StateRoot = dstP.link1State dstP.link2blk1 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link2blk1.Proof, dstP.link2blk1.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) dstP.link2blk2 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link2blk2.Proof, dstP.link2blk2.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) fakeChildParams.Nonce = uint64(1) dstP.link2blk3 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link2blk3.Proof, dstP.link2blk3.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) dstP.link2 = th.RequireNewTipSet(t, dstP.link2blk1, dstP.link2blk2, dstP.link2blk3) if mockStateRoots { dstP.link2State = dstP.cidGetter() } else { dstP.link2State = dstP.genStateRoot } fakeChildParams.Parent = dstP.link2 fakeChildParams.StateRoot = dstP.link2State dstP.link3blk1 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link3blk1.Proof, dstP.link3blk1.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) dstP.link3 = th.RequireNewTipSet(t, dstP.link3blk1) if mockStateRoots { dstP.link3State = dstP.cidGetter() } else { dstP.link3State = dstP.genStateRoot } fakeChildParams.Parent = dstP.link3 fakeChildParams.StateRoot = dstP.link3State fakeChildParams.NullBlockCount = uint64(2) dstP.link4blk1 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link4blk1.Proof, dstP.link4blk1.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) fakeChildParams.Nonce = uint64(1) dstP.link4blk2 = th.RequireMkFakeChildWithCon(t, fakeChildParams) dstP.link4blk2.Proof, dstP.link4blk2.Ticket, err = th.MakeProofAndWinningTicket(minerWorker, minerPower, totalPower, mockSigner) require.NoError(t, err) dstP.link4 = th.RequireNewTipSet(t, dstP.link4blk1, dstP.link4blk2) if mockStateRoots { dstP.link4State = dstP.cidGetter() } else { dstP.link4State = dstP.genStateRoot } } // loadSyncerFromRepo creates a store and syncer from an existing repo. func loadSyncerFromRepo(t *testing.T, r repo.Repo, dstP *SyncerTestParams) (*chain.Syncer, *th.TestFetcher) { powerTable := &th.TestView{} bs := bstore.NewBlockstore(r.Datastore()) cst := &hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))} verifier := &verification.FakeVerifier{ VerifyPoStValid: true, } con := consensus.NewExpected(cst, bs, th.NewTestProcessor(), th.NewFakeBlockValidator(), powerTable, dstP.genCid, verifier, th.BlockTimeTest) calcGenBlk, err := initGenesis(dstP.minerAddress, dstP.minerOwnerAddress, dstP.minerPeerID, cst, bs) // flushes state require.NoError(t, err) calcGenBlk.StateRoot = dstP.genStateRoot chainDS := r.ChainDatastore() chainStore := chain.NewStore(chainDS, cst, &state.TreeStateLoader{}, calcGenBlk.Cid()) blockSource := th.NewTestFetcher() syncer := chain.NewSyncer(con, chainStore, blockSource, chain.Syncing) ctx := context.Background() err = chainStore.Load(ctx) require.NoError(t, err) return syncer, blockSource } // initSyncTestDefault creates and returns the datastructures (syncer, store, repo, fetcher) // needed to run tests. It also sets the global test variables appropriately. func initSyncTestDefault(t *testing.T, dstP *SyncerTestParams) (*chain.Syncer, *chain.Store, repo.Repo, *th.TestFetcher) { processor := th.NewTestProcessor() powerTable := &th.TestView{} r := repo.NewInMemoryRepo() bs := bstore.NewBlockstore(r.Datastore()) cst := &hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))} verifier := &verification.FakeVerifier{ VerifyPoStValid: true, } con := consensus.NewExpected(cst, bs, processor, th.NewFakeBlockValidator(), powerTable, dstP.genCid, verifier, th.BlockTimeTest) requireSetTestChain(t, con, false, dstP) initGenesisWrapper := func(cst *hamt.CborIpldStore, bs bstore.Blockstore) (*types.Block, error) { return initGenesis(dstP.minerAddress, dstP.minerOwnerAddress, dstP.minerPeerID, cst, bs) } return initSyncTest(t, con, initGenesisWrapper, cst, bs, r, dstP, chain.Syncing) } func initSyncTest(t *testing.T, con consensus.Protocol, genFunc func(cst *hamt.CborIpldStore, bs bstore.Blockstore) (*types.Block, error), cst *hamt.CborIpldStore, bs bstore.Blockstore, r repo.Repo, dstP *SyncerTestParams, syncMode chain.SyncMode) (*chain.Syncer, *chain.Store, repo.Repo, *th.TestFetcher) { ctx := context.Background() calcGenBlk, err := genFunc(cst, bs) // flushes state require.NoError(t, err) calcGenBlk.StateRoot = dstP.genStateRoot chainDS := r.ChainDatastore() chainStore := chain.NewStore(chainDS, cst, &state.TreeStateLoader{}, calcGenBlk.Cid()) fetcher := th.NewTestFetcher() syncer := chain.NewSyncer(con, chainStore, fetcher, syncMode) // note we use same cst for on and offline for tests // Initialize stores to contain dstP.genesis block and state calcGenTS := th.RequireNewTipSet(t, calcGenBlk) genTsas := &chain.TipSetAndState{ TipSet: calcGenTS, TipSetStateRoot: dstP.genStateRoot, } require.NoError(t, chainStore.PutTipSetAndState(ctx, genTsas)) err = chainStore.SetHead(ctx, calcGenTS) // Initialize chainStore store with correct dstP.genesis require.NoError(t, err) requireHead(t, chainStore, calcGenTS) requireTsAdded(t, chainStore, calcGenTS) return syncer, chainStore, r, fetcher } func containsTipSet(tsasSlice []*chain.TipSetAndState, ts types.TipSet) bool { for _, tsas := range tsasSlice { if tsas.TipSet.String() == ts.String() { //bingo return true } } return false } type requireTsAddedChainStore interface { GetTipSet(types.TipSetKey) (types.TipSet, error) GetTipSetAndStatesByParentsAndHeight(string, uint64) ([]*chain.TipSetAndState, error) } func requireTsAdded(t *testing.T, chain requireTsAddedChainStore, ts types.TipSet) { h, err := ts.Height() require.NoError(t, err) // Tip Index correctly updated gotTs, err := chain.GetTipSet(ts.Key()) require.NoError(t, err) require.Equal(t, ts, gotTs) parent, err := ts.Parents() require.NoError(t, err) childTsasSlice, err := chain.GetTipSetAndStatesByParentsAndHeight(parent.String(), h) require.NoError(t, err) require.True(t, containsTipSet(childTsasSlice, ts)) } func requireHead(t *testing.T, chain HeadAndTipsetGetter, head types.TipSet) { require.Equal(t, head, requireHeadTipset(t, chain)) } func assertHead(t *testing.T, chain HeadAndTipsetGetter, head types.TipSet) { headTipSet, err := chain.GetTipSet(chain.GetHead()) assert.NoError(t, err) assert.Equal(t, head, headTipSet) } func requirePutBlocks(_ *testing.T, f *th.TestFetcher, blocks ...*types.Block) types.TipSetKey { var cids []cid.Cid for _, block := range blocks { c := block.Cid() cids = append(cids, c) } f.AddSourceBlocks(blocks...) return types.NewTipSetKey(cids...) } // Syncer is capable of recovering from a fork reorg after Load. // See https://github.com/filecoin-project/go-filecoin/issues/1148#issuecomment-432008060 func TestLoadFork(t *testing.T) { tf.UnitTest(t) dstP := initDSTParams() syncer, chainStore, r, blockSource := initSyncTestDefault(t, dstP) ctx := context.Background() // Set up chain store to have standard chain up to dstP.link2 _ = requirePutBlocks(t, blockSource, dstP.link1.ToSlice()...) cids2 := requirePutBlocks(t, blockSource, dstP.link2.ToSlice()...) err := syncer.HandleNewTipset(ctx, cids2) require.NoError(t, err) // Now sync the store with a heavier fork, forking off dstP.link1. forkbase := th.RequireNewTipSet(t, dstP.link2blk1) signer, ki := types.NewMockSignersAndKeyInfo(2) minerWorker, err := ki[0].Address() require.NoError(t, err) fakeChildParams := th.FakeChildParams{ Parent: forkbase, GenesisCid: dstP.genCid, MinerAddr: dstP.minerAddress, Nonce: uint64(1), StateRoot: dstP.genStateRoot, Signer: signer, MinerWorker: minerWorker, } forklink1blk1 := th.RequireMkFakeChild(t, fakeChildParams) fakeChildParams.Nonce = uint64(1) forklink1blk2 := th.RequireMkFakeChild(t, fakeChildParams) fakeChildParams.Nonce = uint64(2) forklink1blk3 := th.RequireMkFakeChild(t, fakeChildParams) forklink1 := th.RequireNewTipSet(t, forklink1blk1, forklink1blk2, forklink1blk3) fakeChildParams.Parent = forklink1 fakeChildParams.Nonce = uint64(0) forklink2blk1 := th.RequireMkFakeChild(t, fakeChildParams) fakeChildParams.Nonce = uint64(1) forklink2blk2 := th.RequireMkFakeChild(t, fakeChildParams) fakeChildParams.Nonce = uint64(2) forklink2blk3 := th.RequireMkFakeChild(t, fakeChildParams) forklink2 := th.RequireNewTipSet(t, forklink2blk1, forklink2blk2, forklink2blk3) fakeChildParams.Nonce = uint64(0) fakeChildParams.Parent = forklink2 forklink3blk1 := th.RequireMkFakeChild(t, fakeChildParams) fakeChildParams.Nonce = uint64(1) forklink3blk2 := th.RequireMkFakeChild(t, fakeChildParams) forklink3 := th.RequireNewTipSet(t, forklink3blk1, forklink3blk2) _ = requirePutBlocks(t, blockSource, forklink1.ToSlice()...) _ = requirePutBlocks(t, blockSource, forklink2.ToSlice()...) forkHead := requirePutBlocks(t, blockSource, forklink3.ToSlice()...) err = syncer.HandleNewTipset(ctx, forkHead) require.NoError(t, err) requireHead(t, chainStore, forklink3) // Put blocks in global IPLD blockstore // TODO #2128 make this cleaner along with broad test cleanup. bs := bstore.NewBlockstore(r.Datastore()) cst := &hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))} requirePutBlocksToCborStore(t, cst, dstP.genTS.ToSlice()...) requirePutBlocksToCborStore(t, cst, dstP.link1.ToSlice()...) requirePutBlocksToCborStore(t, cst, dstP.link2.ToSlice()...) requirePutBlocksToCborStore(t, cst, forklink1.ToSlice()...) requirePutBlocksToCborStore(t, cst, forklink2.ToSlice()...) requirePutBlocksToCborStore(t, cst, forklink3.ToSlice()...) // Shut down store, reload and wire to syncer. loadSyncer, blockSource := loadSyncerFromRepo(t, r, dstP) // Test that the syncer can't sync a block on the old chain // without getting old blocks from network. i.e. the repo is trimmed // of non-heaviest chain blocks cids3 := requirePutBlocks(t, blockSource, dstP.link3.ToSlice()...) err = loadSyncer.HandleNewTipset(ctx, cids3) assert.Error(t, err) // Test that the syncer can sync a block on the heaviest chain // without getting old blocks from the network. fakeChildParams.Parent = forklink3 forklink4blk1 := th.RequireMkFakeChild(t, fakeChildParams) forklink4 := th.RequireNewTipSet(t, forklink4blk1) cidsFork4 := requirePutBlocks(t, blockSource, forklink4.ToSlice()...) err = loadSyncer.HandleNewTipset(ctx, cidsFork4) assert.NoError(t, err) } // Syncer handles MarketView weight comparisons. // Current issue: when creating miner mining with addr0, addr0's storage head isn't found in the blockstore // and I can't figure out why because we pass in the correct blockstore to createStorageMinerWithpower. func TestTipSetWeightDeep(t *testing.T) { tf.UnitTest(t) r := repo.NewInMemoryRepo() bs := bstore.NewBlockstore(r.Datastore()) cst := &hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))} ctx := context.Background() mockSigner, ki := types.NewMockSignersAndKeyInfo(3) minerWorker1, err := ki[0].Address() require.NoError(t, err) minerWorker2, err := ki[1].Address() require.NoError(t, err) // set up dstP.genesis block with power genCfg := &gengen.GenesisCfg{ ProofsMode: types.TestProofsMode, Keys: 4, Miners: []*gengen.CreateStorageMinerConfig{ { NumCommittedSectors: 0, SectorSize: types.OneKiBSectorSize.Uint64(), }, { NumCommittedSectors: 10, SectorSize: types.OneKiBSectorSize.Uint64(), }, { NumCommittedSectors: 10, SectorSize: types.OneKiBSectorSize.Uint64(), }, { NumCommittedSectors: 980, SectorSize: types.OneKiBSectorSize.Uint64(), }, }, } totalPower := types.NewBytesAmount(1000).Mul(types.OneKiBSectorSize) info, err := gengen.GenGen(ctx, genCfg, cst, bs, 0) require.NoError(t, err) var calcGenBlk types.Block require.NoError(t, cst.Get(ctx, info.GenesisCid, &calcGenBlk)) chainStore := chain.NewStore(r.ChainDatastore(), cst, &state.TreeStateLoader{}, calcGenBlk.Cid()) verifier := &verification.FakeVerifier{ VerifyPoStValid: true, } con := consensus.NewExpected(cst, bs, th.NewTestProcessor(), th.NewFakeBlockValidator(), &th.TestView{}, calcGenBlk.Cid(), verifier, th.BlockTimeTest) // Initialize stores to contain dstP.genesis block and state calcGenTS := th.RequireNewTipSet(t, &calcGenBlk) genTsas := &chain.TipSetAndState{ TipSet: calcGenTS, TipSetStateRoot: calcGenBlk.StateRoot, } require.NoError(t, chainStore.PutTipSetAndState(ctx, genTsas)) err = chainStore.SetHead(ctx, calcGenTS) // Initialize chainStore with correct dstP.genesis require.NoError(t, err) requireHead(t, chainStore, calcGenTS) requireTsAdded(t, chainStore, calcGenTS) // Setup a fetcher for feeding blocks into the syncer. blockSource := th.NewTestFetcher() // Now sync the chainStore with consensus using a MarketView. verifier = &verification.FakeVerifier{ VerifyPoStValid: true, } con = consensus.NewExpected(cst, bs, th.NewTestProcessor(), th.NewFakeBlockValidator(), &consensus.MarketView{}, calcGenBlk.Cid(), verifier, th.BlockTimeTest) syncer := chain.NewSyncer(con, chainStore, blockSource, chain.Syncing) baseTS := requireHeadTipset(t, chainStore) // this is the last block of the bootstrapping chain creating miners require.Equal(t, 1, baseTS.Len()) bootstrapStateRoot := baseTS.ToSlice()[0].StateRoot pSt, err := state.LoadStateTree(ctx, cst, baseTS.ToSlice()[0].StateRoot, builtin.Actors) require.NoError(t, err) /* Test chain diagram and weight calcs */ // (Note f1b1 = fork 1 block 1) // // f1b1 -> {f1b2a, f1b2b} // // f2b1 -> f2b2 // // sw=starting weight, apw=added parent weight, mw=miner weight, ew=expected weight // w({blk}) = sw + apw + mw = sw + ew // w({fXb1}) = sw + 0 + 11 = sw + 11 // w({f1b1, f2b1}) = sw + 0 + 11 * 2 = sw + 22 // w({f1b2a, f1b2b}) = sw + 11 + 11 * 2 = sw + 33 // w({f2b2}) = sw + 11 + 108 = sw + 119 startingWeight, err := con.Weight(ctx, baseTS, pSt) require.NoError(t, err) wFun := func(ts types.TipSet) (uint64, error) { // No power-altering messages processed from here on out. // And so bootstrapSt correctly retrives power table for all // test blocks. return con.Weight(ctx, ts, pSt) } fakeChildParams := th.FakeChildParams{ Parent: baseTS, GenesisCid: calcGenBlk.Cid(), StateRoot: bootstrapStateRoot, Signer: mockSigner, MinerAddr: info.Miners[1].Address, MinerWorker: minerWorker1, } f1b1 := th.RequireMkFakeChildCore(t, fakeChildParams, wFun) f1b1.Proof, f1b1.Ticket, err = th.MakeProofAndWinningTicket(minerWorker1, info.Miners[1].Power, totalPower, mockSigner) require.NoError(t, err) fakeChildParams.Nonce = uint64(1) fakeChildParams.MinerAddr = info.Miners[2].Address f2b1 := th.RequireMkFakeChildCore(t, fakeChildParams, wFun) f2b1.Proof, f2b1.Ticket, err = th.MakeProofAndWinningTicket(minerWorker1, info.Miners[2].Power, totalPower, mockSigner) require.NoError(t, err) tsShared := th.RequireNewTipSet(t, f1b1, f2b1) // Sync first tipset, should have weight 22 + starting sharedCids := requirePutBlocks(t, blockSource, f1b1, f2b1) err = syncer.HandleNewTipset(ctx, sharedCids) require.NoError(t, err) assertHead(t, chainStore, tsShared) measuredWeight, err := wFun(requireHeadTipset(t, chainStore)) require.NoError(t, err) expectedWeight := startingWeight + uint64(22000) assert.Equal(t, expectedWeight, measuredWeight) // fork 1 is heavier than the old head. fakeChildParams = th.FakeChildParams{ Parent: th.RequireNewTipSet(t, f1b1), GenesisCid: calcGenBlk.Cid(), StateRoot: bootstrapStateRoot, Signer: mockSigner, MinerAddr: info.Miners[1].Address, MinerWorker: minerWorker1, } f1b2a := th.RequireMkFakeChildCore(t, fakeChildParams, wFun) f1b2a.Proof, f1b2a.Ticket, err = th.MakeProofAndWinningTicket(minerWorker1, info.Miners[1].Power, totalPower, mockSigner) require.NoError(t, err) fakeChildParams.Nonce = uint64(1) fakeChildParams.MinerAddr = info.Miners[2].Address fakeChildParams.MinerWorker = minerWorker2 f1b2b := th.RequireMkFakeChildCore(t, fakeChildParams, wFun) f1b2b.Proof, f1b2b.Ticket, err = th.MakeProofAndWinningTicket(minerWorker2, info.Miners[2].Power, totalPower, mockSigner) require.NoError(t, err) f1 := th.RequireNewTipSet(t, f1b2a, f1b2b) f1Cids := requirePutBlocks(t, blockSource, f1.ToSlice()...) err = syncer.HandleNewTipset(ctx, f1Cids) require.NoError(t, err) assertHead(t, chainStore, f1) measuredWeight, err = wFun(requireHeadTipset(t, chainStore)) require.NoError(t, err) expectedWeight = startingWeight + uint64(33000) assert.Equal(t, expectedWeight, measuredWeight) // fork 2 has heavier weight because of addr3's power even though there // are fewer blocks in the tipset than fork 1. fakeChildParams = th.FakeChildParams{ Parent: th.RequireNewTipSet(t, f2b1), GenesisCid: calcGenBlk.Cid(), Signer: mockSigner, StateRoot: bootstrapStateRoot, MinerAddr: info.Miners[3].Address, MinerWorker: minerWorker2, } f2b2 := th.RequireMkFakeChildCore(t, fakeChildParams, wFun) f2b2.Proof, f2b2.Ticket, err = th.MakeProofAndWinningTicket(minerWorker2, info.Miners[3].Power, totalPower, mockSigner) require.NoError(t, err) f2 := th.RequireNewTipSet(t, f2b2) f2Cids := requirePutBlocks(t, blockSource, f2.ToSlice()...) err = syncer.HandleNewTipset(ctx, f2Cids) require.NoError(t, err) assertHead(t, chainStore, f2) measuredWeight, err = wFun(requireHeadTipset(t, chainStore)) require.NoError(t, err) expectedWeight = startingWeight + uint64(119000) assert.Equal(t, expectedWeight, measuredWeight) } func initGenesis(minerAddress address.Address, minerOwnerAddress address.Address, minerPeerID peer.ID, cst *hamt.CborIpldStore, bs bstore.Blockstore) (*types.Block, error) { return consensus.MakeGenesisFunc( consensus.MinerActor(minerAddress, minerOwnerAddress, minerPeerID, types.ZeroAttoFIL, types.OneKiBSectorSize), )(cst, bs) }
1
20,355
The genesis block needs to exist in the store the fetcher pulls from, this is because the fetcher will stop fetching when it finds a block it has seen before and this can sometimes be the genesis block.
filecoin-project-venus
go
@@ -1111,7 +1111,7 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { return err } - err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655) + err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0600) if err != nil { return err }
1
// +build linux package libcontainer import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "os" "os/exec" "path/filepath" "reflect" "strings" "sync" "syscall" // only for SysProcAttr and Signal "time" securejoin "github.com/cyphar/filepath-securejoin" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/utils" "github.com/opencontainers/runtime-spec/specs-go" criurpc "github.com/checkpoint-restore/go-criu/rpc" "github.com/golang/protobuf/proto" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) const stdioFdCount = 3 type linuxContainer struct { id string root string config *configs.Config cgroupManager cgroups.Manager intelRdtManager intelrdt.Manager initPath string initArgs []string initProcess parentProcess initProcessStartTime uint64 criuPath string newuidmapPath string newgidmapPath string m sync.Mutex criuVersion int state containerState created time.Time } // State represents a running container's state type State struct { BaseState // Platform specific fields below here // Specified if the container was started under the rootless mode. // Set to true if BaseState.Config.RootlessEUID && BaseState.Config.RootlessCgroups Rootless bool `json:"rootless"` // Path to all the cgroups setup for a container. Key is cgroup subsystem name // with the value as the path. CgroupPaths map[string]string `json:"cgroup_paths"` // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type // with the value as the path. NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"` // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore ExternalDescriptors []string `json:"external_descriptors,omitempty"` // Intel RDT "resource control" filesystem path IntelRdtPath string `json:"intel_rdt_path"` } // Container is a libcontainer container object. // // Each container is thread-safe within the same process. Since a container can // be destroyed by a separate process, any function may return that the container // was not found. type Container interface { BaseContainer // Methods below here are platform specific // Checkpoint checkpoints the running container's state to disk using the criu(8) utility. // // errors: // Systemerror - System error. Checkpoint(criuOpts *CriuOpts) error // Restore restores the checkpointed container to a running state using the criu(8) utility. // // errors: // Systemerror - System error. Restore(process *Process, criuOpts *CriuOpts) error // If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses // the execution of any user processes. Asynchronously, when the container finished being paused the // state is changed to PAUSED. // If the Container state is PAUSED, do nothing. // // errors: // ContainerNotExists - Container no longer exists, // ContainerNotRunning - Container not running or created, // Systemerror - System error. Pause() error // If the Container state is PAUSED, resumes the execution of any user processes in the // Container before setting the Container state to RUNNING. // If the Container state is RUNNING, do nothing. // // errors: // ContainerNotExists - Container no longer exists, // ContainerNotPaused - Container is not paused, // Systemerror - System error. Resume() error // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification. // // errors: // Systemerror - System error. NotifyOOM() (<-chan struct{}, error) // NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level // // errors: // Systemerror - System error. NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) } // ID returns the container's unique ID func (c *linuxContainer) ID() string { return c.id } // Config returns the container's configuration func (c *linuxContainer) Config() configs.Config { return *c.config } func (c *linuxContainer) Status() (Status, error) { c.m.Lock() defer c.m.Unlock() return c.currentStatus() } func (c *linuxContainer) State() (*State, error) { c.m.Lock() defer c.m.Unlock() return c.currentState() } func (c *linuxContainer) OCIState() (*specs.State, error) { c.m.Lock() defer c.m.Unlock() return c.currentOCIState() } func (c *linuxContainer) Processes() ([]int, error) { pids, err := c.cgroupManager.GetAllPids() if err != nil { return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups") } return pids, nil } func (c *linuxContainer) Stats() (*Stats, error) { var ( err error stats = &Stats{} ) if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { return stats, newSystemErrorWithCause(err, "getting container stats from cgroups") } if c.intelRdtManager != nil { if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil { return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats") } } for _, iface := range c.config.Networks { switch iface.Type { case "veth": istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) if err != nil { return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName) } stats.Interfaces = append(stats.Interfaces, istats) } } return stats, nil } func (c *linuxContainer) Set(config configs.Config) error { c.m.Lock() defer c.m.Unlock() status, err := c.currentStatus() if err != nil { return err } if status == Stopped { return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning) } if err := c.cgroupManager.Set(&config); err != nil { // Set configs back if err2 := c.cgroupManager.Set(c.config); err2 != nil { logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2) } return err } if c.intelRdtManager != nil { if err := c.intelRdtManager.Set(&config); err != nil { // Set configs back if err2 := c.intelRdtManager.Set(c.config); err2 != nil { logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2) } return err } } // After config setting succeed, update config and states c.config = &config _, err = c.updateState(nil) return err } func (c *linuxContainer) Start(process *Process) error { c.m.Lock() defer c.m.Unlock() if process.Init { if err := c.createExecFifo(); err != nil { return err } } if err := c.start(process); err != nil { if process.Init { c.deleteExecFifo() } return err } return nil } func (c *linuxContainer) Run(process *Process) error { if err := c.Start(process); err != nil { return err } if process.Init { return c.exec() } return nil } func (c *linuxContainer) Exec() error { c.m.Lock() defer c.m.Unlock() return c.exec() } func (c *linuxContainer) exec() error { path := filepath.Join(c.root, execFifoFilename) fifoOpen := make(chan struct{}) select { case <-awaitProcessExit(c.initProcess.pid(), fifoOpen): return errors.New("container process is already dead") case result := <-awaitFifoOpen(path): close(fifoOpen) if result.err != nil { return result.err } f := result.file defer f.Close() if err := readFromExecFifo(f); err != nil { return err } return os.Remove(path) } } func readFromExecFifo(execFifo io.Reader) error { data, err := ioutil.ReadAll(execFifo) if err != nil { return err } if len(data) <= 0 { return fmt.Errorf("cannot start an already running container") } return nil } func awaitProcessExit(pid int, exit <-chan struct{}) <-chan struct{} { isDead := make(chan struct{}) go func() { for { select { case <-exit: return case <-time.After(time.Millisecond * 100): stat, err := system.Stat(pid) if err != nil || stat.State == system.Zombie { close(isDead) return } } } }() return isDead } func awaitFifoOpen(path string) <-chan openResult { fifoOpened := make(chan openResult) go func() { f, err := os.OpenFile(path, os.O_RDONLY, 0) if err != nil { fifoOpened <- openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")} return } fifoOpened <- openResult{file: f} }() return fifoOpened } type openResult struct { file *os.File err error } func (c *linuxContainer) start(process *Process) error { parent, err := c.newParentProcess(process) if err != nil { return newSystemErrorWithCause(err, "creating new parent process") } parent.forwardChildLogs() if err := parent.start(); err != nil { // terminate the process to ensure that it properly is reaped. if err := ignoreTerminateErrors(parent.terminate()); err != nil { logrus.Warn(err) } return newSystemErrorWithCause(err, "starting container process") } // generate a timestamp indicating when the container was started c.created = time.Now().UTC() if process.Init { c.state = &createdState{ c: c, } state, err := c.updateState(parent) if err != nil { return err } c.initProcessStartTime = state.InitProcessStartTime if c.config.Hooks != nil { s, err := c.currentOCIState() if err != nil { return err } for i, hook := range c.config.Hooks.Poststart { if err := hook.Run(s); err != nil { if err := ignoreTerminateErrors(parent.terminate()); err != nil { logrus.Warn(err) } return newSystemErrorWithCausef(err, "running poststart hook %d", i) } } } } return nil } func (c *linuxContainer) Signal(s os.Signal, all bool) error { if all { return signalAllProcesses(c.cgroupManager, s) } status, err := c.currentStatus() if err != nil { return err } // to avoid a PID reuse attack if status == Running || status == Created || status == Paused { if err := c.initProcess.signal(s); err != nil { return newSystemErrorWithCause(err, "signaling init process") } return nil } return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning) } func (c *linuxContainer) createExecFifo() error { rootuid, err := c.Config().HostRootUID() if err != nil { return err } rootgid, err := c.Config().HostRootGID() if err != nil { return err } fifoName := filepath.Join(c.root, execFifoFilename) if _, err := os.Stat(fifoName); err == nil { return fmt.Errorf("exec fifo %s already exists", fifoName) } oldMask := unix.Umask(0000) if err := unix.Mkfifo(fifoName, 0622); err != nil { unix.Umask(oldMask) return err } unix.Umask(oldMask) return os.Chown(fifoName, rootuid, rootgid) } func (c *linuxContainer) deleteExecFifo() { fifoName := filepath.Join(c.root, execFifoFilename) os.Remove(fifoName) } // includeExecFifo opens the container's execfifo as a pathfd, so that the // container cannot access the statedir (and the FIFO itself remains // un-opened). It then adds the FifoFd to the given exec.Cmd as an inherited // fd, with _LIBCONTAINER_FIFOFD set to its fd number. func (c *linuxContainer) includeExecFifo(cmd *exec.Cmd) error { fifoName := filepath.Join(c.root, execFifoFilename) fifoFd, err := unix.Open(fifoName, unix.O_PATH|unix.O_CLOEXEC, 0) if err != nil { return err } cmd.ExtraFiles = append(cmd.ExtraFiles, os.NewFile(uintptr(fifoFd), fifoName)) cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_FIFOFD=%d", stdioFdCount+len(cmd.ExtraFiles)-1)) return nil } func (c *linuxContainer) newParentProcess(p *Process) (parentProcess, error) { parentInitPipe, childInitPipe, err := utils.NewSockPair("init") if err != nil { return nil, newSystemErrorWithCause(err, "creating new init pipe") } messageSockPair := filePair{parentInitPipe, childInitPipe} parentLogPipe, childLogPipe, err := os.Pipe() if err != nil { return nil, fmt.Errorf("Unable to create the log pipe: %s", err) } logFilePair := filePair{parentLogPipe, childLogPipe} cmd, err := c.commandTemplate(p, childInitPipe, childLogPipe) if err != nil { return nil, newSystemErrorWithCause(err, "creating new command template") } if !p.Init { return c.newSetnsProcess(p, cmd, messageSockPair, logFilePair) } // We only set up fifoFd if we're not doing a `runc exec`. The historic // reason for this is that previously we would pass a dirfd that allowed // for container rootfs escape (and not doing it in `runc exec` avoided // that problem), but we no longer do that. However, there's no need to do // this for `runc exec` so we just keep it this way to be safe. if err := c.includeExecFifo(cmd); err != nil { return nil, newSystemErrorWithCause(err, "including execfifo in cmd.Exec setup") } return c.newInitProcess(p, cmd, messageSockPair, logFilePair) } func (c *linuxContainer) commandTemplate(p *Process, childInitPipe *os.File, childLogPipe *os.File) (*exec.Cmd, error) { cmd := exec.Command(c.initPath, c.initArgs[1:]...) cmd.Args[0] = c.initArgs[0] cmd.Stdin = p.Stdin cmd.Stdout = p.Stdout cmd.Stderr = p.Stderr cmd.Dir = c.config.Rootfs if cmd.SysProcAttr == nil { cmd.SysProcAttr = &syscall.SysProcAttr{} } cmd.Env = append(cmd.Env, fmt.Sprintf("GOMAXPROCS=%s", os.Getenv("GOMAXPROCS"))) cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...) if p.ConsoleSocket != nil { cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket) cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), ) } cmd.ExtraFiles = append(cmd.ExtraFiles, childInitPipe) cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), fmt.Sprintf("_LIBCONTAINER_STATEDIR=%s", c.root), ) cmd.ExtraFiles = append(cmd.ExtraFiles, childLogPipe) cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_LOGPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), fmt.Sprintf("_LIBCONTAINER_LOGLEVEL=%s", p.LogLevel), ) // NOTE: when running a container with no PID namespace and the parent process spawning the container is // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason // even with the parent still running. if c.config.ParentDeathSignal > 0 { cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal) } return cmd, nil } func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*initProcess, error) { cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard)) nsMaps := make(map[configs.NamespaceType]string) for _, ns := range c.config.Namespaces { if ns.Path != "" { nsMaps[ns.Type] = ns.Path } } _, sharePidns := nsMaps[configs.NEWPID] data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps) if err != nil { return nil, err } init := &initProcess{ cmd: cmd, messageSockPair: messageSockPair, logFilePair: logFilePair, manager: c.cgroupManager, intelRdtManager: c.intelRdtManager, config: c.newInitConfig(p), container: c, process: p, bootstrapData: data, sharePidns: sharePidns, } c.initProcess = init return init, nil } func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*setnsProcess, error) { cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns)) state, err := c.currentState() if err != nil { return nil, newSystemErrorWithCause(err, "getting container's current state") } // for setns process, we don't have to set cloneflags as the process namespaces // will only be set via setns syscall data, err := c.bootstrapData(0, state.NamespacePaths) if err != nil { return nil, err } return &setnsProcess{ cmd: cmd, cgroupPaths: c.cgroupManager.GetPaths(), rootlessCgroups: c.config.RootlessCgroups, intelRdtPath: state.IntelRdtPath, messageSockPair: messageSockPair, logFilePair: logFilePair, config: c.newInitConfig(p), process: p, bootstrapData: data, }, nil } func (c *linuxContainer) newInitConfig(process *Process) *initConfig { cfg := &initConfig{ Config: c.config, Args: process.Args, Env: process.Env, User: process.User, AdditionalGroups: process.AdditionalGroups, Cwd: process.Cwd, Capabilities: process.Capabilities, PassedFilesCount: len(process.ExtraFiles), ContainerId: c.ID(), NoNewPrivileges: c.config.NoNewPrivileges, RootlessEUID: c.config.RootlessEUID, RootlessCgroups: c.config.RootlessCgroups, AppArmorProfile: c.config.AppArmorProfile, ProcessLabel: c.config.ProcessLabel, Rlimits: c.config.Rlimits, } if process.NoNewPrivileges != nil { cfg.NoNewPrivileges = *process.NoNewPrivileges } if process.AppArmorProfile != "" { cfg.AppArmorProfile = process.AppArmorProfile } if process.Label != "" { cfg.ProcessLabel = process.Label } if len(process.Rlimits) > 0 { cfg.Rlimits = process.Rlimits } cfg.CreateConsole = process.ConsoleSocket != nil cfg.ConsoleWidth = process.ConsoleWidth cfg.ConsoleHeight = process.ConsoleHeight return cfg } func (c *linuxContainer) Destroy() error { c.m.Lock() defer c.m.Unlock() return c.state.destroy() } func (c *linuxContainer) Pause() error { c.m.Lock() defer c.m.Unlock() status, err := c.currentStatus() if err != nil { return err } switch status { case Running, Created: if err := c.cgroupManager.Freeze(configs.Frozen); err != nil { return err } return c.state.transition(&pausedState{ c: c, }) } return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning) } func (c *linuxContainer) Resume() error { c.m.Lock() defer c.m.Unlock() status, err := c.currentStatus() if err != nil { return err } if status != Paused { return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused) } if err := c.cgroupManager.Freeze(configs.Thawed); err != nil { return err } return c.state.transition(&runningState{ c: c, }) } func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) { // XXX(cyphar): This requires cgroups. if c.config.RootlessCgroups { logrus.Warn("getting OOM notifications may fail if you don't have the full access to cgroups") } return notifyOnOOM(c.cgroupManager.GetPaths()) } func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) { // XXX(cyphar): This requires cgroups. if c.config.RootlessCgroups { logrus.Warn("getting memory pressure notifications may fail if you don't have the full access to cgroups") } return notifyMemoryPressure(c.cgroupManager.GetPaths(), level) } var criuFeatures *criurpc.CriuFeatures func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error { var t criurpc.CriuReqType t = criurpc.CriuReqType_FEATURE_CHECK // criu 1.8 => 10800 if err := c.checkCriuVersion(10800); err != nil { // Feature checking was introduced with CRIU 1.8. // Ignore the feature check if an older CRIU version is used // and just act as before. // As all automated PR testing is done using CRIU 1.7 this // code will not be tested by automated PR testing. return nil } // make sure the features we are looking for are really not from // some previous check criuFeatures = nil req := &criurpc.CriuReq{ Type: &t, // Theoretically this should not be necessary but CRIU // segfaults if Opts is empty. // Fixed in CRIU 2.12 Opts: rpcOpts, Features: criuFeat, } err := c.criuSwrk(nil, req, criuOpts, false, nil) if err != nil { logrus.Debugf("%s", err) return fmt.Errorf("CRIU feature check failed") } logrus.Debugf("Feature check says: %s", criuFeatures) missingFeatures := false // The outer if checks if the fields actually exist if (criuFeat.MemTrack != nil) && (criuFeatures.MemTrack != nil) { // The inner if checks if they are set to true if *criuFeat.MemTrack && !*criuFeatures.MemTrack { missingFeatures = true logrus.Debugf("CRIU does not support MemTrack") } } // This needs to be repeated for every new feature check. // Is there a way to put this in a function. Reflection? if (criuFeat.LazyPages != nil) && (criuFeatures.LazyPages != nil) { if *criuFeat.LazyPages && !*criuFeatures.LazyPages { missingFeatures = true logrus.Debugf("CRIU does not support LazyPages") } } if missingFeatures { return fmt.Errorf("CRIU is missing features") } return nil } func parseCriuVersion(path string) (int, error) { var x, y, z int out, err := exec.Command(path, "-V").Output() if err != nil { return 0, fmt.Errorf("Unable to execute CRIU command: %s", path) } x = 0 y = 0 z = 0 if ep := strings.Index(string(out), "-"); ep >= 0 { // criu Git version format var version string if sp := strings.Index(string(out), "GitID"); sp > 0 { version = string(out)[sp:ep] } else { return 0, fmt.Errorf("Unable to parse the CRIU version: %s", path) } n, err := fmt.Sscanf(version, "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 if err != nil { n, err = fmt.Sscanf(version, "GitID: v%d.%d", &x, &y) // 1.6 y++ } else { z++ } if n < 2 || err != nil { return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err) } } else { // criu release version format n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2 if err != nil { n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6 } if n < 2 || err != nil { return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err) } } return x*10000 + y*100 + z, nil } func compareCriuVersion(criuVersion int, minVersion int) error { // simple function to perform the actual version compare if criuVersion < minVersion { return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion) } return nil } // This is used to store the result of criu version RPC var criuVersionRPC *criurpc.CriuVersion // checkCriuVersion checks Criu version greater than or equal to minVersion func (c *linuxContainer) checkCriuVersion(minVersion int) error { // If the version of criu has already been determined there is no need // to ask criu for the version again. Use the value from c.criuVersion. if c.criuVersion != 0 { return compareCriuVersion(c.criuVersion, minVersion) } // First try if this version of CRIU support the version RPC. // The CRIU version RPC was introduced with CRIU 3.0. // First, reset the variable for the RPC answer to nil criuVersionRPC = nil var t criurpc.CriuReqType t = criurpc.CriuReqType_VERSION req := &criurpc.CriuReq{ Type: &t, } err := c.criuSwrk(nil, req, nil, false, nil) if err != nil { return fmt.Errorf("CRIU version check failed: %s", err) } if criuVersionRPC != nil { logrus.Debugf("CRIU version: %s", criuVersionRPC) // major and minor are always set c.criuVersion = int(*criuVersionRPC.Major) * 10000 c.criuVersion += int(*criuVersionRPC.Minor) * 100 if criuVersionRPC.Sublevel != nil { c.criuVersion += int(*criuVersionRPC.Sublevel) } if criuVersionRPC.Gitid != nil { // runc's convention is that a CRIU git release is // always the same as increasing the minor by 1 c.criuVersion -= (c.criuVersion % 100) c.criuVersion += 100 } return compareCriuVersion(c.criuVersion, minVersion) } // This is CRIU without the version RPC and therefore // older than 3.0. Parsing the output is required. // This can be remove once runc does not work with criu older than 3.0 c.criuVersion, err = parseCriuVersion(c.criuPath) if err != nil { return err } return compareCriuVersion(c.criuVersion, minVersion) } const descriptorsFilename = "descriptors.json" func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) { mountDest := m.Destination if strings.HasPrefix(mountDest, c.config.Rootfs) { mountDest = mountDest[len(c.config.Rootfs):] } extMnt := &criurpc.ExtMountMap{ Key: proto.String(mountDest), Val: proto.String(mountDest), } req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error { for _, path := range c.config.MaskPaths { fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path)) if err != nil { if os.IsNotExist(err) { continue } return err } if fi.IsDir() { continue } extMnt := &criurpc.ExtMountMap{ Key: proto.String(path), Val: proto.String("/dev/null"), } req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } return nil } func waitForCriuLazyServer(r *os.File, status string) error { data := make([]byte, 1) _, err := r.Read(data) if err != nil { return err } fd, err := os.OpenFile(status, os.O_TRUNC|os.O_WRONLY, os.ModeAppend) if err != nil { return err } _, err = fd.Write(data) if err != nil { return err } fd.Close() return nil } func (c *linuxContainer) handleCriuConfigurationFile(rpcOpts *criurpc.CriuOpts) { // CRIU will evaluate a configuration starting with release 3.11. // Settings in the configuration file will overwrite RPC settings. // Look for annotations. The annotation 'org.criu.config' // specifies if CRIU should use a different, container specific // configuration file. _, annotations := utils.Annotations(c.config.Labels) configFile, exists := annotations["org.criu.config"] if exists { // If the annotation 'org.criu.config' exists and is set // to a non-empty string, tell CRIU to use that as a // configuration file. If the file does not exist, CRIU // will just ignore it. if configFile != "" { rpcOpts.ConfigFile = proto.String(configFile) } // If 'org.criu.config' exists and is set to an empty // string, a runc specific CRIU configuration file will // be not set at all. } else { // If the mentioned annotation has not been found, specify // a default CRIU configuration file. rpcOpts.ConfigFile = proto.String("/etc/criu/runc.conf") } } func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { c.m.Lock() defer c.m.Unlock() // Checkpoint is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS(). // (CLI prints a warning) // TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has // support for doing unprivileged dumps, but the setup of // rootless containers might make this complicated. // criu 1.5.2 => 10502 if err := c.checkCriuVersion(10502); err != nil { return err } if criuOpts.ImagesDirectory == "" { return fmt.Errorf("invalid directory to save checkpoint") } // Since a container can be C/R'ed multiple times, // the checkpoint directory may already exist. if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { return err } if criuOpts.WorkDirectory == "" { criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") } if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { return err } workDir, err := os.Open(criuOpts.WorkDirectory) if err != nil { return err } defer workDir.Close() imageDir, err := os.Open(criuOpts.ImagesDirectory) if err != nil { return err } defer imageDir.Close() rpcOpts := criurpc.CriuOpts{ ImagesDirFd: proto.Int32(int32(imageDir.Fd())), WorkDirFd: proto.Int32(int32(workDir.Fd())), LogLevel: proto.Int32(4), LogFile: proto.String("dump.log"), Root: proto.String(c.config.Rootfs), ManageCgroups: proto.Bool(true), NotifyScripts: proto.Bool(true), Pid: proto.Int32(int32(c.initProcess.pid())), ShellJob: proto.Bool(criuOpts.ShellJob), LeaveRunning: proto.Bool(criuOpts.LeaveRunning), TcpEstablished: proto.Bool(criuOpts.TcpEstablished), ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), FileLocks: proto.Bool(criuOpts.FileLocks), EmptyNs: proto.Uint32(criuOpts.EmptyNs), OrphanPtsMaster: proto.Bool(true), AutoDedup: proto.Bool(criuOpts.AutoDedup), LazyPages: proto.Bool(criuOpts.LazyPages), } c.handleCriuConfigurationFile(&rpcOpts) // If the container is running in a network namespace and has // a path to the network namespace configured, we will dump // that network namespace as an external namespace and we // will expect that the namespace exists during restore. // This basically means that CRIU will ignore the namespace // and expect to be setup correctly. nsPath := c.config.Namespaces.PathOf(configs.NEWNET) if nsPath != "" { // For this to work we need at least criu 3.11.0 => 31100. // As there was already a successful version check we will // not error out if it fails. runc will just behave as it used // to do and ignore external network namespaces. err := c.checkCriuVersion(31100) if err == nil { // CRIU expects the information about an external namespace // like this: --external net[<inode>]:<key> // This <key> is always 'extRootNetNS'. var netns syscall.Stat_t err = syscall.Stat(nsPath, &netns) if err != nil { return err } criuExternal := fmt.Sprintf("net[%d]:extRootNetNS", netns.Ino) rpcOpts.External = append(rpcOpts.External, criuExternal) } } fcg := c.cgroupManager.GetPaths()["freezer"] if fcg != "" { rpcOpts.FreezeCgroup = proto.String(fcg) } // append optional criu opts, e.g., page-server and port if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 { rpcOpts.Ps = &criurpc.CriuPageServerInfo{ Address: proto.String(criuOpts.PageServer.Address), Port: proto.Int32(criuOpts.PageServer.Port), } } //pre-dump may need parentImage param to complete iterative migration if criuOpts.ParentImage != "" { rpcOpts.ParentImg = proto.String(criuOpts.ParentImage) rpcOpts.TrackMem = proto.Bool(true) } // append optional manage cgroups mode if criuOpts.ManageCgroupsMode != 0 { // criu 1.7 => 10700 if err := c.checkCriuVersion(10700); err != nil { return err } mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) rpcOpts.ManageCgroupsMode = &mode } var t criurpc.CriuReqType if criuOpts.PreDump { feat := criurpc.CriuFeatures{ MemTrack: proto.Bool(true), } if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil { return err } t = criurpc.CriuReqType_PRE_DUMP } else { t = criurpc.CriuReqType_DUMP } req := &criurpc.CriuReq{ Type: &t, Opts: &rpcOpts, } if criuOpts.LazyPages { // lazy migration requested; check if criu supports it feat := criurpc.CriuFeatures{ LazyPages: proto.Bool(true), } if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil { return err } statusRead, statusWrite, err := os.Pipe() if err != nil { return err } rpcOpts.StatusFd = proto.Int32(int32(statusWrite.Fd())) go waitForCriuLazyServer(statusRead, criuOpts.StatusFd) } //no need to dump these information in pre-dump if !criuOpts.PreDump { for _, m := range c.config.Mounts { switch m.Device { case "bind": c.addCriuDumpMount(req, m) case "cgroup": binds, err := getCgroupMounts(m) if err != nil { return err } for _, b := range binds { c.addCriuDumpMount(req, b) } } } if err := c.addMaskPaths(req); err != nil { return err } for _, node := range c.config.Devices { m := &configs.Mount{Destination: node.Path, Source: node.Path} c.addCriuDumpMount(req, m) } // Write the FD info to a file in the image directory fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors()) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655) if err != nil { return err } } err = c.criuSwrk(nil, req, criuOpts, false, nil) if err != nil { return err } return nil } func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) { mountDest := m.Destination if strings.HasPrefix(mountDest, c.config.Rootfs) { mountDest = mountDest[len(c.config.Rootfs):] } extMnt := &criurpc.ExtMountMap{ Key: proto.String(mountDest), Val: proto.String(m.Source), } req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) { for _, iface := range c.config.Networks { switch iface.Type { case "veth": veth := new(criurpc.CriuVethPair) veth.IfOut = proto.String(iface.HostInterfaceName) veth.IfIn = proto.String(iface.Name) req.Opts.Veths = append(req.Opts.Veths, veth) case "loopback": // Do nothing } } for _, i := range criuOpts.VethPairs { veth := new(criurpc.CriuVethPair) veth.IfOut = proto.String(i.HostInterfaceName) veth.IfIn = proto.String(i.ContainerInterfaceName) req.Opts.Veths = append(req.Opts.Veths, veth) } } // makeCriuRestoreMountpoints makes the actual mountpoints for the // restore using CRIU. This function is inspired from the code in // rootfs_linux.go func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error { switch m.Device { case "cgroup": // Do nothing for cgroup, CRIU should handle it case "bind": // The prepareBindMount() function checks if source // exists. So it cannot be used for other filesystem types. if err := prepareBindMount(m, c.config.Rootfs); err != nil { return err } default: // for all other file-systems just create the mountpoints dest, err := securejoin.SecureJoin(c.config.Rootfs, m.Destination) if err != nil { return err } if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil { return err } m.Destination = dest if err := os.MkdirAll(dest, 0755); err != nil { return err } } return nil } // isPathInPrefixList is a small function for CRIU restore to make sure // mountpoints, which are on a tmpfs, are not created in the roofs func isPathInPrefixList(path string, prefix []string) bool { for _, p := range prefix { if strings.HasPrefix(path, p+"/") { return false } } return true } // prepareCriuRestoreMounts tries to set up the rootfs of the // container to be restored in the same way runc does it for // initial container creation. Even for a read-only rootfs container // runc modifies the rootfs to add mountpoints which do not exist. // This function also creates missing mountpoints as long as they // are not on top of a tmpfs, as CRIU will restore tmpfs content anyway. func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error { // First get a list of a all tmpfs mounts tmpfs := []string{} for _, m := range mounts { switch m.Device { case "tmpfs": tmpfs = append(tmpfs, m.Destination) } } // Now go through all mounts and create the mountpoints // if the mountpoints are not on a tmpfs, as CRIU will // restore the complete tmpfs content from its checkpoint. for _, m := range mounts { if isPathInPrefixList(m.Destination, tmpfs) { if err := c.makeCriuRestoreMountpoints(m); err != nil { return err } } } return nil } func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { c.m.Lock() defer c.m.Unlock() var extraFiles []*os.File // Restore is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS(). // (CLI prints a warning) // TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have // support for unprivileged restore at the moment. // criu 1.5.2 => 10502 if err := c.checkCriuVersion(10502); err != nil { return err } if criuOpts.WorkDirectory == "" { criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") } // Since a container can be C/R'ed multiple times, // the work directory may already exist. if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { return err } workDir, err := os.Open(criuOpts.WorkDirectory) if err != nil { return err } defer workDir.Close() if criuOpts.ImagesDirectory == "" { return fmt.Errorf("invalid directory to restore checkpoint") } imageDir, err := os.Open(criuOpts.ImagesDirectory) if err != nil { return err } defer imageDir.Close() // CRIU has a few requirements for a root directory: // * it must be a mount point // * its parent must not be overmounted // c.config.Rootfs is bind-mounted to a temporary directory // to satisfy these requirements. root := filepath.Join(c.root, "criu-root") if err := os.Mkdir(root, 0755); err != nil { return err } defer os.Remove(root) root, err = filepath.EvalSymlinks(root) if err != nil { return err } err = unix.Mount(c.config.Rootfs, root, "", unix.MS_BIND|unix.MS_REC, "") if err != nil { return err } defer unix.Unmount(root, unix.MNT_DETACH) t := criurpc.CriuReqType_RESTORE req := &criurpc.CriuReq{ Type: &t, Opts: &criurpc.CriuOpts{ ImagesDirFd: proto.Int32(int32(imageDir.Fd())), WorkDirFd: proto.Int32(int32(workDir.Fd())), EvasiveDevices: proto.Bool(true), LogLevel: proto.Int32(4), LogFile: proto.String("restore.log"), RstSibling: proto.Bool(true), Root: proto.String(root), ManageCgroups: proto.Bool(true), NotifyScripts: proto.Bool(true), ShellJob: proto.Bool(criuOpts.ShellJob), ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), TcpEstablished: proto.Bool(criuOpts.TcpEstablished), FileLocks: proto.Bool(criuOpts.FileLocks), EmptyNs: proto.Uint32(criuOpts.EmptyNs), OrphanPtsMaster: proto.Bool(true), AutoDedup: proto.Bool(criuOpts.AutoDedup), LazyPages: proto.Bool(criuOpts.LazyPages), }, } c.handleCriuConfigurationFile(req.Opts) // Same as during checkpointing. If the container has a specific network namespace // assigned to it, this now expects that the checkpoint will be restored in a // already created network namespace. nsPath := c.config.Namespaces.PathOf(configs.NEWNET) if nsPath != "" { // For this to work we need at least criu 3.11.0 => 31100. // As there was already a successful version check we will // not error out if it fails. runc will just behave as it used // to do and ignore external network namespaces. err := c.checkCriuVersion(31100) if err == nil { // CRIU wants the information about an existing network namespace // like this: --inherit-fd fd[<fd>]:<key> // The <key> needs to be the same as during checkpointing. // We are always using 'extRootNetNS' as the key in this. netns, err := os.Open(nsPath) defer netns.Close() if err != nil { logrus.Errorf("If a specific network namespace is defined it must exist: %s", err) return fmt.Errorf("Requested network namespace %v does not exist", nsPath) } inheritFd := new(criurpc.InheritFd) inheritFd.Key = proto.String("extRootNetNS") // The offset of four is necessary because 0, 1, 2 and 3 is already // used by stdin, stdout, stderr, 'criu swrk' socket. inheritFd.Fd = proto.Int32(int32(4 + len(extraFiles))) req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) // All open FDs need to be transferred to CRIU via extraFiles extraFiles = append(extraFiles, netns) } } // This will modify the rootfs of the container in the same way runc // modifies the container during initial creation. if err := c.prepareCriuRestoreMounts(c.config.Mounts); err != nil { return err } for _, m := range c.config.Mounts { switch m.Device { case "bind": c.addCriuRestoreMount(req, m) case "cgroup": binds, err := getCgroupMounts(m) if err != nil { return err } for _, b := range binds { c.addCriuRestoreMount(req, b) } } } if len(c.config.MaskPaths) > 0 { m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"} c.addCriuRestoreMount(req, m) } for _, node := range c.config.Devices { m := &configs.Mount{Destination: node.Path, Source: node.Path} c.addCriuRestoreMount(req, m) } if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 { c.restoreNetwork(req, criuOpts) } // append optional manage cgroups mode if criuOpts.ManageCgroupsMode != 0 { // criu 1.7 => 10700 if err := c.checkCriuVersion(10700); err != nil { return err } mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) req.Opts.ManageCgroupsMode = &mode } var ( fds []string fdJSON []byte ) if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil { return err } if err := json.Unmarshal(fdJSON, &fds); err != nil { return err } for i := range fds { if s := fds[i]; strings.Contains(s, "pipe:") { inheritFd := new(criurpc.InheritFd) inheritFd.Key = proto.String(s) inheritFd.Fd = proto.Int32(int32(i)) req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) } } return c.criuSwrk(process, req, criuOpts, true, extraFiles) } func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error { // XXX: Do we need to deal with this case? AFAIK criu still requires root. if err := c.cgroupManager.Apply(pid); err != nil { return err } if err := c.cgroupManager.Set(c.config); err != nil { return newSystemError(err) } path := fmt.Sprintf("/proc/%d/cgroup", pid) cgroupsPaths, err := cgroups.ParseCgroupFile(path) if err != nil { return err } for c, p := range cgroupsPaths { cgroupRoot := &criurpc.CgroupRoot{ Ctrl: proto.String(c), Path: proto.String(p), } req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot) } return nil } func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool, extraFiles []*os.File) error { fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0) if err != nil { return err } var logPath string if opts != nil { logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile()) } else { // For the VERSION RPC 'opts' is set to 'nil' and therefore // opts.WorkDirectory does not exist. Set logPath to "". logPath = "" } criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client") criuClientFileCon, err := net.FileConn(criuClient) criuClient.Close() if err != nil { return err } criuClientCon := criuClientFileCon.(*net.UnixConn) defer criuClientCon.Close() criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server") defer criuServer.Close() args := []string{"swrk", "3"} if c.criuVersion != 0 { // If the CRIU Version is still '0' then this is probably // the initial CRIU run to detect the version. Skip it. logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath) } logrus.Debugf("Using CRIU with following args: %s", args) cmd := exec.Command(c.criuPath, args...) if process != nil { cmd.Stdin = process.Stdin cmd.Stdout = process.Stdout cmd.Stderr = process.Stderr } cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer) if extraFiles != nil { cmd.ExtraFiles = append(cmd.ExtraFiles, extraFiles...) } if err := cmd.Start(); err != nil { return err } criuServer.Close() defer func() { criuClientCon.Close() _, err := cmd.Process.Wait() if err != nil { return } }() if applyCgroups { err := c.criuApplyCgroups(cmd.Process.Pid, req) if err != nil { return err } } var extFds []string if process != nil { extFds, err = getPipeFds(cmd.Process.Pid) if err != nil { return err } } logrus.Debugf("Using CRIU in %s mode", req.GetType().String()) // In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts() // should be empty. For older CRIU versions it still will be // available but empty. criurpc.CriuReqType_VERSION actually // has no req.GetOpts(). if !(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK || req.GetType() == criurpc.CriuReqType_VERSION) { val := reflect.ValueOf(req.GetOpts()) v := reflect.Indirect(val) for i := 0; i < v.NumField(); i++ { st := v.Type() name := st.Field(i).Name if strings.HasPrefix(name, "XXX_") { continue } value := val.MethodByName("Get" + name).Call([]reflect.Value{}) logrus.Debugf("CRIU option %s with value %v", name, value[0]) } } data, err := proto.Marshal(req) if err != nil { return err } _, err = criuClientCon.Write(data) if err != nil { return err } buf := make([]byte, 10*4096) oob := make([]byte, 4096) for true { n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob) if err != nil { return err } if n == 0 { return fmt.Errorf("unexpected EOF") } if n == len(buf) { return fmt.Errorf("buffer is too small") } resp := new(criurpc.CriuResp) err = proto.Unmarshal(buf[:n], resp) if err != nil { return err } if !resp.GetSuccess() { typeString := req.GetType().String() if typeString == "VERSION" { // If the VERSION RPC fails this probably means that the CRIU // version is too old for this RPC. Just return 'nil'. return nil } return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath) } t := resp.GetType() switch { case t == criurpc.CriuReqType_VERSION: logrus.Debugf("CRIU version: %s", resp) criuVersionRPC = resp.GetVersion() break case t == criurpc.CriuReqType_FEATURE_CHECK: logrus.Debugf("Feature check says: %s", resp) criuFeatures = resp.GetFeatures() case t == criurpc.CriuReqType_NOTIFY: if err := c.criuNotifications(resp, process, opts, extFds, oob[:oobn]); err != nil { return err } t = criurpc.CriuReqType_NOTIFY req = &criurpc.CriuReq{ Type: &t, NotifySuccess: proto.Bool(true), } data, err = proto.Marshal(req) if err != nil { return err } _, err = criuClientCon.Write(data) if err != nil { return err } continue case t == criurpc.CriuReqType_RESTORE: case t == criurpc.CriuReqType_DUMP: case t == criurpc.CriuReqType_PRE_DUMP: default: return fmt.Errorf("unable to parse the response %s", resp.String()) } break } criuClientCon.CloseWrite() // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors. // Here we want to wait only the CRIU process. st, err := cmd.Process.Wait() if err != nil { return err } // In pre-dump mode CRIU is in a loop and waits for // the final DUMP command. // The current runc pre-dump approach, however, is // start criu in PRE_DUMP once for a single pre-dump // and not the whole series of pre-dump, pre-dump, ...m, dump // If we got the message CriuReqType_PRE_DUMP it means // CRIU was successful and we need to forcefully stop CRIU if !st.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP { return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath) } return nil } // block any external network activity func lockNetwork(config *configs.Config) error { for _, config := range config.Networks { strategy, err := getStrategy(config.Type) if err != nil { return err } if err := strategy.detach(config); err != nil { return err } } return nil } func unlockNetwork(config *configs.Config) error { for _, config := range config.Networks { strategy, err := getStrategy(config.Type) if err != nil { return err } if err = strategy.attach(config); err != nil { return err } } return nil } func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string, oob []byte) error { notify := resp.GetNotify() if notify == nil { return fmt.Errorf("invalid response: %s", resp.String()) } logrus.Debugf("notify: %s\n", notify.GetScript()) switch { case notify.GetScript() == "post-dump": f, err := os.Create(filepath.Join(c.root, "checkpoint")) if err != nil { return err } f.Close() case notify.GetScript() == "network-unlock": if err := unlockNetwork(c.config); err != nil { return err } case notify.GetScript() == "network-lock": if err := lockNetwork(c.config); err != nil { return err } case notify.GetScript() == "setup-namespaces": if c.config.Hooks != nil { s, err := c.currentOCIState() if err != nil { return nil } s.Pid = int(notify.GetPid()) for i, hook := range c.config.Hooks.Prestart { if err := hook.Run(s); err != nil { return newSystemErrorWithCausef(err, "running prestart hook %d", i) } } } case notify.GetScript() == "post-restore": pid := notify.GetPid() r, err := newRestoredProcess(int(pid), fds) if err != nil { return err } process.ops = r if err := c.state.transition(&restoredState{ imageDir: opts.ImagesDirectory, c: c, }); err != nil { return err } // create a timestamp indicating when the restored checkpoint was started c.created = time.Now().UTC() if _, err := c.updateState(r); err != nil { return err } if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil { if !os.IsNotExist(err) { logrus.Error(err) } } case notify.GetScript() == "orphan-pts-master": scm, err := unix.ParseSocketControlMessage(oob) if err != nil { return err } fds, err := unix.ParseUnixRights(&scm[0]) if err != nil { return err } master := os.NewFile(uintptr(fds[0]), "orphan-pts-master") defer master.Close() // While we can access console.master, using the API is a good idea. if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil { return err } } return nil } func (c *linuxContainer) updateState(process parentProcess) (*State, error) { if process != nil { c.initProcess = process } state, err := c.currentState() if err != nil { return nil, err } err = c.saveState(state) if err != nil { return nil, err } return state, nil } func (c *linuxContainer) saveState(s *State) error { f, err := os.Create(filepath.Join(c.root, stateFilename)) if err != nil { return err } defer f.Close() return utils.WriteJSON(f, s) } func (c *linuxContainer) deleteState() error { return os.Remove(filepath.Join(c.root, stateFilename)) } func (c *linuxContainer) currentStatus() (Status, error) { if err := c.refreshState(); err != nil { return -1, err } return c.state.status(), nil } // refreshState needs to be called to verify that the current state on the // container is what is true. Because consumers of libcontainer can use it // out of process we need to verify the container's status based on runtime // information and not rely on our in process info. func (c *linuxContainer) refreshState() error { paused, err := c.isPaused() if err != nil { return err } if paused { return c.state.transition(&pausedState{c: c}) } t, err := c.runType() if err != nil { return err } switch t { case Created: return c.state.transition(&createdState{c: c}) case Running: return c.state.transition(&runningState{c: c}) } return c.state.transition(&stoppedState{c: c}) } func (c *linuxContainer) runType() (Status, error) { if c.initProcess == nil { return Stopped, nil } pid := c.initProcess.pid() stat, err := system.Stat(pid) if err != nil { return Stopped, nil } if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead { return Stopped, nil } // We'll create exec fifo and blocking on it after container is created, // and delete it after start container. if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil { return Created, nil } return Running, nil } func (c *linuxContainer) isPaused() (bool, error) { fcg := c.cgroupManager.GetPaths()["freezer"] if fcg == "" { // A container doesn't have a freezer cgroup return false, nil } pausedState := "FROZEN" filename := "freezer.state" if cgroups.IsCgroup2UnifiedMode() { filename = "cgroup.freeze" pausedState = "1" } data, err := ioutil.ReadFile(filepath.Join(fcg, filename)) if err != nil { // If freezer cgroup is not mounted, the container would just be not paused. if os.IsNotExist(err) { return false, nil } return false, newSystemErrorWithCause(err, "checking if container is paused") } return bytes.Equal(bytes.TrimSpace(data), []byte(pausedState)), nil } func (c *linuxContainer) currentState() (*State, error) { var ( startTime uint64 externalDescriptors []string pid = -1 ) if c.initProcess != nil { pid = c.initProcess.pid() startTime, _ = c.initProcess.startTime() externalDescriptors = c.initProcess.externalDescriptors() } intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID()) if err != nil { intelRdtPath = "" } state := &State{ BaseState: BaseState{ ID: c.ID(), Config: *c.config, InitProcessPid: pid, InitProcessStartTime: startTime, Created: c.created, }, Rootless: c.config.RootlessEUID && c.config.RootlessCgroups, CgroupPaths: c.cgroupManager.GetPaths(), IntelRdtPath: intelRdtPath, NamespacePaths: make(map[configs.NamespaceType]string), ExternalDescriptors: externalDescriptors, } if pid > 0 { for _, ns := range c.config.Namespaces { state.NamespacePaths[ns.Type] = ns.GetPath(pid) } for _, nsType := range configs.NamespaceTypes() { if !configs.IsNamespaceSupported(nsType) { continue } if _, ok := state.NamespacePaths[nsType]; !ok { ns := configs.Namespace{Type: nsType} state.NamespacePaths[ns.Type] = ns.GetPath(pid) } } } return state, nil } func (c *linuxContainer) currentOCIState() (*specs.State, error) { bundle, annotations := utils.Annotations(c.config.Labels) state := &specs.State{ Version: specs.Version, ID: c.ID(), Bundle: bundle, Annotations: annotations, } status, err := c.currentStatus() if err != nil { return nil, err } state.Status = status.String() if status != Stopped { if c.initProcess != nil { state.Pid = c.initProcess.pid() } } return state, nil } // orderNamespacePaths sorts namespace paths into a list of paths that we // can setns in order. func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) { paths := []string{} for _, ns := range configs.NamespaceTypes() { // Remove namespaces that we don't need to join. if !c.config.Namespaces.Contains(ns) { continue } if p, ok := namespaces[ns]; ok && p != "" { // check if the requested namespace is supported if !configs.IsNamespaceSupported(ns) { return nil, newSystemError(fmt.Errorf("namespace %s is not supported", ns)) } // only set to join this namespace if it exists if _, err := os.Lstat(p); err != nil { return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p) } // do not allow namespace path with comma as we use it to separate // the namespace paths if strings.ContainsRune(p, ',') { return nil, newSystemError(fmt.Errorf("invalid path %s", p)) } paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p)) } } return paths, nil } func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) { data := bytes.NewBuffer(nil) for _, im := range idMap { line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size) if _, err := data.WriteString(line); err != nil { return nil, err } } return data.Bytes(), nil } // bootstrapData encodes the necessary data in netlink binary format // as a io.Reader. // Consumer can write the data to a bootstrap program // such as one that uses nsenter package to bootstrap the container's // init process correctly, i.e. with correct namespaces, uid/gid // mapping etc. func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) { // create the netlink message r := nl.NewNetlinkRequest(int(InitMsg), 0) // write cloneFlags r.AddData(&Int32msg{ Type: CloneFlagsAttr, Value: uint32(cloneFlags), }) // write custom namespace paths if len(nsMaps) > 0 { nsPaths, err := c.orderNamespacePaths(nsMaps) if err != nil { return nil, err } r.AddData(&Bytemsg{ Type: NsPathsAttr, Value: []byte(strings.Join(nsPaths, ",")), }) } // write namespace paths only when we are not joining an existing user ns _, joinExistingUser := nsMaps[configs.NEWUSER] if !joinExistingUser { // write uid mappings if len(c.config.UidMappings) > 0 { if c.config.RootlessEUID && c.newuidmapPath != "" { r.AddData(&Bytemsg{ Type: UidmapPathAttr, Value: []byte(c.newuidmapPath), }) } b, err := encodeIDMapping(c.config.UidMappings) if err != nil { return nil, err } r.AddData(&Bytemsg{ Type: UidmapAttr, Value: b, }) } // write gid mappings if len(c.config.GidMappings) > 0 { b, err := encodeIDMapping(c.config.GidMappings) if err != nil { return nil, err } r.AddData(&Bytemsg{ Type: GidmapAttr, Value: b, }) if c.config.RootlessEUID && c.newgidmapPath != "" { r.AddData(&Bytemsg{ Type: GidmapPathAttr, Value: []byte(c.newgidmapPath), }) } if requiresRootOrMappingTool(c.config) { r.AddData(&Boolmsg{ Type: SetgroupAttr, Value: true, }) } } } if c.config.OomScoreAdj != nil { // write oom_score_adj r.AddData(&Bytemsg{ Type: OomScoreAdjAttr, Value: []byte(fmt.Sprintf("%d", *c.config.OomScoreAdj)), }) } // write rootless r.AddData(&Boolmsg{ Type: RootlessEUIDAttr, Value: c.config.RootlessEUID, }) return bytes.NewReader(r.Serialize()), nil } // ignoreTerminateErrors returns nil if the given err matches an error known // to indicate that the terminate occurred successfully or err was nil, otherwise // err is returned unaltered. func ignoreTerminateErrors(err error) error { if err == nil { return nil } s := err.Error() switch { case strings.Contains(s, "process already finished"), strings.Contains(s, "Wait was already called"): return nil } return err } func requiresRootOrMappingTool(c *configs.Config) bool { gidMap := []configs.IDMap{ {ContainerID: 0, HostID: os.Getegid(), Size: 1}, } return !reflect.DeepEqual(c.GidMappings, gidMap) }
1
18,069
should we do the same for os.Mkdir(criuOpts.WorkDirectory, 0755)?
opencontainers-runc
go
@@ -87,7 +87,8 @@ module.exports.rebaseBraveStringFilesOnChromiumL10nFiles = (path) => .replace('<include name="IDR_MD_HISTORY_SIDE_BAR_HTML"', '<include name="IDR_MD_HISTORY_SIDE_BAR_HTML" flattenhtml="true"') .replace(pageVisibility, bravePageVisibility + pageVisibility) .replace(/settings_chromium_strings.grdp/g, 'settings_brave_strings.grdp') - .replace(/Automatically sends usage statistics and crash reports to Brave/g, 'Automatically sends crash reports to Brave') + .replace(/Automatically send usage statistics and crash reports to Google/g, 'Automatically send crash reports to Google') + .replace(/Automatically sends usage statistics and crash reports to Google/g, 'Automatically sends crash reports to Google') .replace(/The Chromium Authors/g, 'Brave Software Inc') .replace(/Google Chrome/g, 'Brave') .replace(/Chromium/g, 'Brave')
1
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ const path = require('path') const fs = require('fs') const srcDir = path.resolve(path.join(__dirname, '..', 'src')) // Brave string paths const braveStringsPath = path.resolve(path.join(srcDir, 'brave', 'app', 'brave_strings.grd')) const braveSettingsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'settings_brave_strings.grdp')) const braveComponentsStringsPath = path.resolve(path.join(srcDir, 'brave', 'app', 'components_brave_strings.grd')) const braveExtensionMessagesPath = path.resolve(path.join(srcDir, 'brave', 'vendor', 'brave-extension', 'app', '_locales', 'en_US', 'messages.json')) const braveSpecificGeneratedResourcesPath = path.resolve(path.join(srcDir, 'brave', 'app', 'brave_generated_resources.grd')) const braveComponentsResourcesPath = path.resolve(path.join(srcDir, 'brave', 'components', 'resources', 'brave_components_resources.grd')) const braveGeneratedResourcesPath = path.resolve(path.join(srcDir, 'brave', 'app', 'generated_resources.grd')) const braveBookmarksPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'bookmarks_strings.grdp')) const braveMediaRouterPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'media_router_strings.grdp')) const braveSettingsStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'settings_strings.grdp')) const braveMdExtensionsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'md_extensions_strings.grdp')) const bravePrintingStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'printing_strings.grdp')) const braveExtensionsResourcesPath = path.resolve(path.join(srcDir, 'brave', 'browser', 'resources', 'md_extensions', 'extensions_resources.grd')) const braveSettingsResourcesPath = path.resolve(path.join(srcDir, 'brave', 'browser', 'resources', 'settings', 'settings_resources.grd')) const braveBrowserResourcesPath = path.resolve(path.join(srcDir, 'brave', 'browser', 'browser_resources.grd')) // Chromium string paths const chromiumStringsPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'chromium_strings.grd')) const chroimumSettingsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'settings_chromium_strings.grdp')) const chromiumComponentsStringsPath = path.resolve(path.join(srcDir, 'components', 'components_chromium_strings.grd')) const chromiumGeneratedResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'generated_resources.grd')) const chromiumBookmarksPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'bookmarks_strings.grdp')) const chromiumMediaRouterPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'media_router_strings.grdp')) const chromiumSettingsStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'settings_strings.grdp')) const chromiumMdExtensionsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'md_extensions_strings.grdp')) const chromePrintingStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'printing_strings.grdp')) const chromiumExtensionsResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'browser', 'resources', 'md_extensions', 'extensions_resources.grd')) const chromiumSettingsResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'browser', 'resources', 'settings', 'settings_resources.grd')) const chromiumBrowserResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'browser', 'browser_resources.grd')) const autoGeneratedWarning = '<!-- This file is created by l10nUtil.js. Do not edit manually. -->' const pageVisibility = ' <structure name="IDR_SETTINGS_PAGE_VISIBILITY_JS"\n' const bravePageVisibility = ' <structure name="IDR_SETTINGS_BRAVE_PAGE_VISIBILITY_JS"\n' + ' file="brave_page_visibility.js"\n' + ' type="chrome_html" />\n' module.exports.getSourceStringPaths = () => { return [ braveStringsPath, braveComponentsStringsPath, braveExtensionMessagesPath, braveSpecificGeneratedResourcesPath, braveComponentsResourcesPath, braveGeneratedResourcesPath, // No strings for now, uncomment if strings are added // path.resolve(path.join(srcDir, 'brave', 'browser', 'resources', 'brave_extension.grd')), // path.resolve(path.join(srcDir, 'brave', 'common', 'extensions', 'api', 'brave_api_resources.grd')), ] } module.exports.rebaseBraveStringFilesOnChromiumL10nFiles = (path) => Object.entries({ [chromiumStringsPath]: braveStringsPath, [chroimumSettingsPartPath]: braveSettingsPartPath, [chromiumComponentsStringsPath]: braveComponentsStringsPath, [chromiumGeneratedResourcesPath]: braveGeneratedResourcesPath, [chromiumBookmarksPartPath]: braveBookmarksPartPath, [chromiumMediaRouterPartPath]: braveMediaRouterPartPath, [chromiumSettingsStringsPartPath]: braveSettingsStringsPartPath, [chromiumMdExtensionsPartPath]: braveMdExtensionsPartPath, [chromePrintingStringsPartPath]: bravePrintingStringsPartPath, [chromiumExtensionsResourcesPath]: braveExtensionsResourcesPath, [chromiumSettingsResourcesPath]: braveSettingsResourcesPath, [chromiumBrowserResourcesPath]: braveBrowserResourcesPath }).forEach(([sourcePath, destPath]) => fs.writeFileSync(destPath, fs.readFileSync(sourcePath, 'utf8') .replace(/<\?xml version="1.0" encoding="utf-8"\?>/i, '<?xml version="1.0" encoding="utf-8"?>\n' + autoGeneratedWarning) .replace('<structure name="IDR_MD_EXTENSIONS_SIDEBAR_HTML"', '<structure name="IDR_MD_EXTENSIONS_SIDEBAR_HTML" preprocess="true"') .replace('<structure name="IDR_SETTINGS_APPEARANCE_FONTS_PAGE_HTML"', '<structure name="IDR_SETTINGS_APPEARANCE_FONTS_PAGE_HTML" preprocess="true"') .replace('<structure name="IDR_SETTINGS_PASSWORDS_SECTION_HTML"', '<structure name="IDR_SETTINGS_PASSWORDS_SECTION_HTML" preprocess="true"') .replace('<include name="IDR_MD_HISTORY_SIDE_BAR_HTML"', '<include name="IDR_MD_HISTORY_SIDE_BAR_HTML" flattenhtml="true"') .replace(pageVisibility, bravePageVisibility + pageVisibility) .replace(/settings_chromium_strings.grdp/g, 'settings_brave_strings.grdp') .replace(/Automatically sends usage statistics and crash reports to Brave/g, 'Automatically sends crash reports to Brave') .replace(/The Chromium Authors/g, 'Brave Software Inc') .replace(/Google Chrome/g, 'Brave') .replace(/Chromium/g, 'Brave') .replace(/Chrome/g, 'Brave') .replace(/Google/g, 'Brave'), 'utf8'))
1
5,362
@bbondy Two messages because the label and description differ very slightly in their wording...
brave-brave-browser
js
@@ -134,7 +134,10 @@ unit_test_get_ymm_caller_saved() register __m256 ymm15 asm("ymm15"); # endif - for (int regno = 0; regno < proc_num_simd_registers(); ++regno) { + /* The function get_ymm_caller_saved is intended to be used for AVX (no AVX-512). It + * currently doesn't cover extended AVX-512 registers. + */ + for (int regno = 0; regno < proc_num_simd_sse_avx_registers(); ++regno) { for (int dword = 0; dword < sizeof(dr_ymm_t) / sizeof(uint); ++dword) { get_buffer[regno].u32[dword] = 0; ref_buffer[regno].u32[dword] = base++;
1
/* ********************************************************** * Copyright (c) 2013-2019 Google, Inc. All rights reserved. * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2001 Hewlett-Packard Company */ /* Copyright (c) 2019 Google, Inc. All rights reserved. */ /* * x86_code_test.c - unit tests for auxiliary asm and some C routines */ #include "../globals.h" #include "../fragment.h" #include "../dispatch.h" #include "../monitor.h" #include "arch.h" #if defined(UNIX) && defined(X86) # include <immintrin.h> #endif #if defined(STANDALONE_UNIT_TEST) # define CONST_BYTE 0x1f # define TEST_STACK_SIZE 4096 /* Align stack to 16 bytes: sufficient for all current architectures. */ byte ALIGN_VAR(16) test_stack[TEST_STACK_SIZE]; static dcontext_t *static_dc; static void check_var(byte *var) { EXPECT(*var, CONST_BYTE); } static void (*check_var_ptr)(byte *) = check_var; static void test_func(dcontext_t *dcontext) { /* i#1577: we want to read the stack without bothering with a separate * assembly routine and without getting an uninit var warning from the * compiler. We go through a separate function and avoid compiler analysis * of that function via an indirect call. */ byte var; check_var_ptr(&var); EXPECT((ptr_uint_t)dcontext, (ptr_uint_t)static_dc); return; } static void test_call_switch_stack(dcontext_t *dc) { byte *stack_ptr = test_stack + TEST_STACK_SIZE; static_dc = dc; print_file(STDERR, "testing asm call_switch_stack\n"); memset(test_stack, CONST_BYTE, sizeof(test_stack)); call_switch_stack(dc, stack_ptr, (void (*)(void *))test_func, NULL, true /* should return */); } static void test_cpuid() { # ifdef X86 int cpuid_res[4] = { 0 }; # endif print_file(STDERR, "testing asm cpuid\n"); EXPECT(cpuid_supported(), IF_X86_ELSE(true, false)); # ifdef X86 our_cpuid(cpuid_res, 0, 0); /* get vendor id */ /* cpuid_res[1..3] stores vendor info like "GenuineIntel" or "AuthenticAMD" for X86 */ EXPECT_NE(cpuid_res[1], 0); EXPECT_NE(cpuid_res[2], 0); EXPECT_NE(cpuid_res[3], 0); # endif } # ifdef __AVX__ static void unit_test_get_ymm_caller_saved() { dr_zmm_t ref_buffer[MCXT_NUM_SIMD_SLOTS]; dr_zmm_t get_buffer[MCXT_NUM_SIMD_SLOTS]; ASSERT(sizeof(dr_zmm_t) == ZMM_REG_SIZE); uint base = 0x78abcdef; register __m256 ymm0 asm("ymm0"); register __m256 ymm1 asm("ymm1"); register __m256 ymm2 asm("ymm2"); register __m256 ymm3 asm("ymm3"); register __m256 ymm4 asm("ymm4"); register __m256 ymm5 asm("ymm5"); register __m256 ymm6 asm("ymm6"); register __m256 ymm7 asm("ymm7"); # ifdef X64 register __m256 ymm8 asm("ymm8"); register __m256 ymm9 asm("ymm9"); register __m256 ymm10 asm("ymm10"); register __m256 ymm11 asm("ymm11"); register __m256 ymm12 asm("ymm12"); register __m256 ymm13 asm("ymm13"); register __m256 ymm14 asm("ymm14"); register __m256 ymm15 asm("ymm15"); # endif for (int regno = 0; regno < proc_num_simd_registers(); ++regno) { for (int dword = 0; dword < sizeof(dr_ymm_t) / sizeof(uint); ++dword) { get_buffer[regno].u32[dword] = 0; ref_buffer[regno].u32[dword] = base++; } memset(&get_buffer[regno].u32[sizeof(dr_ymm_t) / sizeof(uint)], 0, sizeof(dr_zmm_t) - sizeof(dr_ymm_t)); memset(&ref_buffer[regno].u32[sizeof(dr_ymm_t) / sizeof(uint)], 0, sizeof(dr_zmm_t) - sizeof(dr_ymm_t)); } # define MAKE_YMM_REG(num) ymm##num # define MOVE_TO_YMM(buf, num) \ asm volatile("vmovdqu %1, %0" : "=v"(MAKE_YMM_REG(num)) : "m"(buf[num]) :); MOVE_TO_YMM(ref_buffer, 0) MOVE_TO_YMM(ref_buffer, 1) MOVE_TO_YMM(ref_buffer, 2) MOVE_TO_YMM(ref_buffer, 3) MOVE_TO_YMM(ref_buffer, 4) MOVE_TO_YMM(ref_buffer, 5) MOVE_TO_YMM(ref_buffer, 6) MOVE_TO_YMM(ref_buffer, 7) # ifdef X64 MOVE_TO_YMM(ref_buffer, 8) MOVE_TO_YMM(ref_buffer, 9) MOVE_TO_YMM(ref_buffer, 10) MOVE_TO_YMM(ref_buffer, 11) MOVE_TO_YMM(ref_buffer, 12) MOVE_TO_YMM(ref_buffer, 13) MOVE_TO_YMM(ref_buffer, 14) MOVE_TO_YMM(ref_buffer, 15) # endif get_ymm_caller_saved(get_buffer); /* Even though it was experimentally determined that it is not needed, this barrier * prevents the compiler from moving SSE code before the call above. */ asm volatile("" ::: "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"); # ifdef X64 asm volatile("" :: : "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"); # endif for (int regno = 0; regno < proc_num_simd_registers(); ++regno) { print_file(STDERR, "YMM%d ref\n:", regno); dump_buffer_as_bytes(STDERR, &ref_buffer[regno], sizeof(ref_buffer[regno]), DUMP_RAW | DUMP_DWORD); print_file(STDERR, "\nYMM%d get\n:", regno); dump_buffer_as_bytes(STDERR, &get_buffer[regno], sizeof(get_buffer[regno]), DUMP_RAW | DUMP_DWORD); print_file(STDERR, "\n"); } EXPECT( memcmp(ref_buffer, get_buffer, proc_num_simd_registers() * MCXT_SIMD_SLOT_SIZE), 0); } # endif # ifdef __AVX512F__ static void unit_test_get_zmm_caller_saved() { /* XXX i#1312: get_zmm_caller_saved(byte* buf) assumes that there is enough * space in the buffer it's being passed. MCXT_NUM_SIMD_SLOTS does not yet * reflect this. Once this happens, the array size should become * MCXT_NUM_SIMD_SLOTS. */ if (MCXT_NUM_SIMD_SLOTS == 32) { /* This is a just reminder.*/ FAIL(); } dr_zmm_t ref_buffer[32]; dr_zmm_t get_buffer[32]; ASSERT(sizeof(dr_zmm_t) == ZMM_REG_SIZE); uint base = 0x78abcdef; register __m512 zmm0 asm("zmm0"); register __m512 zmm1 asm("zmm1"); register __m512 zmm2 asm("zmm2"); register __m512 zmm3 asm("zmm3"); register __m512 zmm4 asm("zmm4"); register __m512 zmm5 asm("zmm5"); register __m512 zmm6 asm("zmm6"); register __m512 zmm7 asm("zmm7"); # ifdef X64 register __m512 zmm8 asm("zmm8"); register __m512 zmm9 asm("zmm9"); register __m512 zmm10 asm("zmm10"); register __m512 zmm11 asm("zmm11"); register __m512 zmm12 asm("zmm12"); register __m512 zmm13 asm("zmm13"); register __m512 zmm14 asm("zmm14"); register __m512 zmm15 asm("zmm15"); register __m512 zmm16 asm("zmm16"); register __m512 zmm17 asm("zmm17"); register __m512 zmm18 asm("zmm18"); register __m512 zmm19 asm("zmm19"); register __m512 zmm20 asm("zmm20"); register __m512 zmm21 asm("zmm21"); register __m512 zmm22 asm("zmm22"); register __m512 zmm23 asm("zmm23"); register __m512 zmm24 asm("zmm24"); register __m512 zmm25 asm("zmm25"); register __m512 zmm26 asm("zmm26"); register __m512 zmm27 asm("zmm27"); register __m512 zmm28 asm("zmm28"); register __m512 zmm29 asm("zmm29"); register __m512 zmm30 asm("zmm30"); register __m512 zmm31 asm("zmm31"); # endif for (int regno = 0; regno < proc_num_simd_registers(); ++regno) { for (int dword = 0; dword < sizeof(dr_zmm_t) / sizeof(uint); ++dword) { get_buffer[regno].u32[dword] = 0; ref_buffer[regno].u32[dword] = base++; } } # define MAKE_ZMM_REG(num) zmm##num # define MOVE_TO_ZMM(buf, num) \ asm volatile("vmovdqu32 %1, %0" : "=v"(MAKE_ZMM_REG(num)) : "m"(buf[num]) :); MOVE_TO_ZMM(ref_buffer, 0) MOVE_TO_ZMM(ref_buffer, 1) MOVE_TO_ZMM(ref_buffer, 2) MOVE_TO_ZMM(ref_buffer, 3) MOVE_TO_ZMM(ref_buffer, 4) MOVE_TO_ZMM(ref_buffer, 5) MOVE_TO_ZMM(ref_buffer, 6) MOVE_TO_ZMM(ref_buffer, 7) # ifdef X64 MOVE_TO_ZMM(ref_buffer, 8) MOVE_TO_ZMM(ref_buffer, 9) MOVE_TO_ZMM(ref_buffer, 10) MOVE_TO_ZMM(ref_buffer, 11) MOVE_TO_ZMM(ref_buffer, 12) MOVE_TO_ZMM(ref_buffer, 13) MOVE_TO_ZMM(ref_buffer, 14) MOVE_TO_ZMM(ref_buffer, 15) MOVE_TO_ZMM(ref_buffer, 16) MOVE_TO_ZMM(ref_buffer, 17) MOVE_TO_ZMM(ref_buffer, 18) MOVE_TO_ZMM(ref_buffer, 19) MOVE_TO_ZMM(ref_buffer, 20) MOVE_TO_ZMM(ref_buffer, 21) MOVE_TO_ZMM(ref_buffer, 22) MOVE_TO_ZMM(ref_buffer, 23) MOVE_TO_ZMM(ref_buffer, 24) MOVE_TO_ZMM(ref_buffer, 25) MOVE_TO_ZMM(ref_buffer, 26) MOVE_TO_ZMM(ref_buffer, 27) MOVE_TO_ZMM(ref_buffer, 28) MOVE_TO_ZMM(ref_buffer, 29) MOVE_TO_ZMM(ref_buffer, 30) MOVE_TO_ZMM(ref_buffer, 31) # endif get_zmm_caller_saved(get_buffer); /* Even though it was experimentally determined that it is not needed, this barrier * prevents the compiler from moving SSE code before the call above. */ asm volatile("" ::: "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"); # ifdef X64 asm volatile("" :: : "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23", "xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"); # endif for (int regno = 0; regno < proc_num_simd_registers(); ++regno) { print_file(STDERR, "ZMM%d ref\n:", regno); dump_buffer_as_bytes(STDERR, &ref_buffer[regno], sizeof(ref_buffer[regno]), DUMP_RAW | DUMP_DWORD); print_file(STDERR, "\nZMM%d get\n:", regno); dump_buffer_as_bytes(STDERR, &get_buffer[regno], sizeof(get_buffer[regno]), DUMP_RAW | DUMP_DWORD); print_file(STDERR, "\n"); } EXPECT( memcmp(ref_buffer, get_buffer, proc_num_simd_registers() * MCXT_SIMD_SLOT_SIZE), 0); } static void unit_test_get_opmask_caller_saved() { /* While DynamoRIO's dr_opmask_t type is 8 bytes, the actual machine register is * really only 8 bytes if the processor and OS support AVX512BW. Otherwise it is * 2 Bytes. */ dr_opmask_t ref_buffer[MCXT_NUM_OPMASK_SLOTS]; dr_opmask_t get_buffer[MCXT_NUM_OPMASK_SLOTS]; ASSERT(sizeof(dr_opmask_t) == OPMASK_REG_SIZE); uint base = 0x0000348e; # ifdef __AVX512BW__ /* i#1312: Modern AVX-512 machines support AVX512BW which extends the OpMask registers * to 8 bytes. The right compile flags must then to be used to compile this test, and * the type will be __mmask64. Also DynamoRIO's get_opmask_caller_saved has to * dynamically switch dependent on a proc_ flag indicating AVX512BW is enabled. */ # error "Unimplemented. Should test using __mmask64 instructions." # else ASSERT(MCXT_NUM_OPMASK_SLOTS == 8); register __mmask16 k0 asm("k0"); register __mmask16 k1 asm("k1"); register __mmask16 k2 asm("k2"); register __mmask16 k3 asm("k3"); register __mmask16 k4 asm("k4"); register __mmask16 k5 asm("k5"); register __mmask16 k6 asm("k6"); register __mmask16 k7 asm("k7"); # endif for (int regno = 0; regno < MCXT_NUM_OPMASK_SLOTS; ++regno) { get_buffer[regno] = 0; ref_buffer[regno] = base++; } # define MAKE_OPMASK_REG(num) k##num # define MOVE_TO_OPMASK(buf, num) \ asm volatile("kmovw %1, %0" : "=k"(MAKE_OPMASK_REG(num)) : "m"(buf[num]) :); MOVE_TO_OPMASK(ref_buffer, 0) MOVE_TO_OPMASK(ref_buffer, 1) MOVE_TO_OPMASK(ref_buffer, 2) MOVE_TO_OPMASK(ref_buffer, 3) MOVE_TO_OPMASK(ref_buffer, 4) MOVE_TO_OPMASK(ref_buffer, 5) MOVE_TO_OPMASK(ref_buffer, 6) MOVE_TO_OPMASK(ref_buffer, 7) get_opmask_caller_saved(get_buffer); /* Barrier, as described in unit_test_get_zmm_caller_saved. */ asm volatile("" ::: "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7"); for (int regno = 0; regno < MCXT_NUM_OPMASK_SLOTS; ++regno) { print_file(STDERR, "K%d ref\n:", regno); dump_buffer_as_bytes(STDERR, &ref_buffer[regno], sizeof(ref_buffer[regno]), DUMP_RAW | DUMP_DWORD); print_file(STDERR, "\nK%d get\n:", regno); dump_buffer_as_bytes(STDERR, &get_buffer[regno], sizeof(get_buffer[regno]), DUMP_RAW | DUMP_DWORD); print_file(STDERR, "\n"); } EXPECT(memcmp(ref_buffer, get_buffer, MCXT_NUM_OPMASK_SLOTS * sizeof(dr_opmask_t)), 0); } # endif void unit_test_asm(dcontext_t *dc) { print_file(STDERR, "testing asm\n"); test_call_switch_stack(dc); test_cpuid(); # ifdef UNIX # ifdef __AVX__ unit_test_get_ymm_caller_saved(); # endif # ifdef __AVX512F__ unit_test_get_zmm_caller_saved(); unit_test_get_opmask_caller_saved(); # endif # endif } #endif /* STANDALONE_UNIT_TEST */
1
17,731
"currently" implies it should and will be changed: so TODO i#?
DynamoRIO-dynamorio
c
@@ -1,8 +1,14 @@ import { translationMacro as t } from 'ember-i18n'; import AbstractIndexRoute from 'hospitalrun/routes/abstract-index-route'; +import Ember from 'ember'; + +const { computed } = Ember; + export default AbstractIndexRoute.extend({ modelName: 'imaging', - pageTitle: t('imaging.pageTitle'), + pageTitle: computed('i18n', () => { + return t('imaging.pageTitle'); + }), searchStatus: 'Requested', _getStartKeyFromItem(item) {
1
import { translationMacro as t } from 'ember-i18n'; import AbstractIndexRoute from 'hospitalrun/routes/abstract-index-route'; export default AbstractIndexRoute.extend({ modelName: 'imaging', pageTitle: t('imaging.pageTitle'), searchStatus: 'Requested', _getStartKeyFromItem(item) { let imagingDateAsTime = item.get('imagingDateAsTime'); let id = this._getPouchIdFromItem(item); let requestedDateAsTime = item.get('requestedDateAsTime'); let searchStatus = this.get('searchStatus'); return [searchStatus, requestedDateAsTime, imagingDateAsTime, id]; }, _modelQueryParams() { let maxId = this._getMaxPouchId(); let maxValue = this.get('maxValue'); let minId = this._getMinPouchId(); let searchStatus = this.get('searchStatus'); return { options: { startkey: [searchStatus, null, null, minId], endkey: [searchStatus, maxValue, maxValue, maxId] }, mapReduce: 'imaging_by_status' }; } });
1
13,696
This should be `computed('i18n.locale'....`
HospitalRun-hospitalrun-frontend
js
@@ -12,7 +12,6 @@ class CommitFlag::BackToLifeTest < ActiveSupport::TestCase btl = CommitFlag::BackToLife.find(cf.id) btl.time_elapsed = 123.0 btl.save! - btl.reload btl.time_elapsed.must_equal 123.0 end end
1
require 'test_helper' class CommitFlag::BackToLifeTest < ActiveSupport::TestCase it '#time_elapsed' do cf = create(:commit_flag, type: 'CommitFlag::BackToLife', data: { time_elapsed: 789.0 }) btl = CommitFlag::BackToLife.find(cf.id) btl.time_elapsed.must_equal 789.0 end it '#time_elapsed=' do cf = create(:commit_flag, type: 'CommitFlag::BackToLife') btl = CommitFlag::BackToLife.find(cf.id) btl.time_elapsed = 123.0 btl.save! btl.reload btl.time_elapsed.must_equal 123.0 end end
1
9,018
Wait, does this not work if we do `btl.reload`? That seems odd that we have to have the object in the same state.
blackducksoftware-ohloh-ui
rb
@@ -22,9 +22,7 @@ define(['playbackManager', 'focusManager', 'appRouter', 'dom'], function (playba var eventListenerCount = 0; function on(scope, fn) { - if (eventListenerCount) { - eventListenerCount++; - } + eventListenerCount++; dom.addEventListener(scope, 'command', fn, {}); }
1
define(['playbackManager', 'focusManager', 'appRouter', 'dom'], function (playbackManager, focusManager, appRouter, dom) { 'use strict'; var lastInputTime = new Date().getTime(); function notify() { lastInputTime = new Date().getTime(); handleCommand('unknown'); } function notifyMouseMove() { lastInputTime = new Date().getTime(); } function idleTime() { return new Date().getTime() - lastInputTime; } function select(sourceElement) { sourceElement.click(); } var eventListenerCount = 0; function on(scope, fn) { if (eventListenerCount) { eventListenerCount++; } dom.addEventListener(scope, 'command', fn, {}); } function off(scope, fn) { if (eventListenerCount) { eventListenerCount--; } dom.removeEventListener(scope, 'command', fn, {}); } var commandTimes = {}; function checkCommandTime(command) { var last = commandTimes[command] || 0; var now = new Date().getTime(); if ((now - last) < 1000) { return false; } commandTimes[command] = now; return true; } function handleCommand(name, options) { lastInputTime = new Date().getTime(); var sourceElement = (options ? options.sourceElement : null); if (sourceElement) { sourceElement = focusManager.focusableParent(sourceElement); } sourceElement = sourceElement || document.activeElement || window; if (eventListenerCount) { var customEvent = new CustomEvent("command", { detail: { command: name }, bubbles: true, cancelable: true }); var eventResult = sourceElement.dispatchEvent(customEvent); if (!eventResult) { // event cancelled return; } } switch (name) { case 'up': focusManager.moveUp(sourceElement); break; case 'down': focusManager.moveDown(sourceElement); break; case 'left': focusManager.moveLeft(sourceElement); break; case 'right': focusManager.moveRight(sourceElement); break; case 'home': appRouter.goHome(); break; case 'settings': appRouter.showSettings(); break; case 'back': appRouter.back(); break; case 'forward': break; case 'select': select(sourceElement); break; case 'pageup': break; case 'pagedown': break; case 'end': break; case 'menu': break; case 'info': break; case 'nextchapter': playbackManager.nextChapter(); break; case 'next': case 'nexttrack': playbackManager.nextTrack(); break; case 'previous': case 'previoustrack': playbackManager.previousTrack(); break; case 'previouschapter': playbackManager.previousChapter(); break; case 'guide': appRouter.showGuide(); break; case 'recordedtv': appRouter.showRecordedTV(); break; case 'record': break; case 'livetv': appRouter.showLiveTV(); break; case 'mute': playbackManager.setMute(true); break; case 'unmute': playbackManager.setMute(false); break; case 'togglemute': playbackManager.toggleMute(); break; case 'channelup': playbackManager.channelUp(); break; case 'channeldown': playbackManager.channelDown(); break; case 'volumedown': playbackManager.volumeDown(); break; case 'volumeup': playbackManager.volumeUp(); break; case 'play': playbackManager.unpause(); break; case 'pause': playbackManager.pause(); break; case 'playpause': playbackManager.playPause(); break; case 'stop': if (checkCommandTime('stop')) { playbackManager.stop(); } break; case 'changezoom': playbackManager.toggleAspectRatio(); break; case 'changeaudiotrack': playbackManager.changeAudioStream(); break; case 'changesubtitletrack': playbackManager.changeSubtitleStream(); break; case 'search': appRouter.showSearch(); break; case 'favorites': appRouter.showFavorites(); break; case 'fastforward': playbackManager.fastForward(); break; case 'rewind': playbackManager.rewind(); break; case 'togglefullscreen': playbackManager.toggleFullscreen(); break; case 'disabledisplaymirror': playbackManager.enableDisplayMirroring(false); break; case 'enabledisplaymirror': playbackManager.enableDisplayMirroring(true); break; case 'toggledisplaymirror': playbackManager.toggleDisplayMirroring(); break; case 'nowplaying': appRouter.showNowPlaying(); break; case 'repeatnone': playbackManager.setRepeatMode('RepeatNone'); break; case 'repeatall': playbackManager.setRepeatMode('RepeatAll'); break; case 'repeatone': playbackManager.setRepeatMode('RepeatOne'); break; default: break; } } dom.addEventListener(document, 'click', notify, { passive: true }); return { trigger: handleCommand, handle: handleCommand, notify: notify, notifyMouseMove: notifyMouseMove, idleTime: idleTime, on: on, off: off }; });
1
12,683
I seem to recall this being the cause of a bug at some point.
jellyfin-jellyfin-web
js
@@ -1648,6 +1648,7 @@ public class LFMainActivity extends SharedMediaActivity { menu.setGroupVisible(R.id.photos_option_men, false); menu.findItem(R.id.all_photos).setVisible(!editMode && !hidden); menu.findItem(R.id.search_action).setVisible(!editMode); + menu.findItem(R.id.settings).setVisible(false); if (getAlbums().getSelectedCount() >= 1) { if (getAlbums().getSelectedCount() > 1) {
1
package org.fossasia.phimpme.gallery.activities; import android.animation.Animator; import android.annotation.TargetApi; import android.content.ContentResolver; import android.content.ContentUris; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.res.ColorStateList; import android.content.res.Configuration; import android.database.Cursor; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Color; import android.graphics.PorterDuff; import android.graphics.PorterDuffColorFilter; import android.graphics.drawable.Drawable; import android.media.MediaScannerConnection; import android.net.Uri; import android.os.AsyncTask; import android.os.Build; import android.os.Bundle; import android.os.Handler; import android.provider.MediaStore; import android.support.annotation.NonNull; import android.support.design.widget.AppBarLayout; import android.support.design.widget.BottomNavigationView; import android.support.design.widget.FloatingActionButton; import android.support.design.widget.Snackbar; import android.support.v4.app.ActivityOptionsCompat; import android.support.v4.content.ContextCompat; import android.support.v4.view.GravityCompat; import android.support.v4.view.MenuItemCompat; import android.support.v4.widget.DrawerLayout; import android.support.v4.widget.SwipeRefreshLayout; import android.support.v7.app.ActionBarDrawerToggle; import android.support.v7.app.AlertDialog; import android.support.v7.widget.CardView; import android.support.v7.widget.DefaultItemAnimator; import android.support.v7.widget.GridLayoutManager; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.support.v7.widget.SearchView; import android.support.v7.widget.SwitchCompat; import android.support.v7.widget.Toolbar; import android.text.Editable; import android.text.Html; import android.text.TextUtils; import android.text.TextWatcher; import android.util.Log; import android.view.Menu; import android.view.MenuItem; import android.view.MotionEvent; import android.view.ScaleGestureDetector; import android.view.View; import android.view.ViewAnimationUtils; import android.view.WindowManager; import android.view.inputmethod.InputMethodManager; import android.webkit.MimeTypeMap; import android.widget.CompoundButton; import android.widget.EditText; import android.widget.FrameLayout; import android.widget.ImageView; import android.widget.RadioButton; import android.widget.RadioGroup; import android.widget.ScrollView; import android.widget.SeekBar; import android.widget.Spinner; import android.widget.TextView; import com.mikepenz.google_material_typeface_library.GoogleMaterial; import com.mikepenz.iconics.view.IconicsImageView; import org.fossasia.phimpme.R; import org.fossasia.phimpme.base.SharedMediaActivity; import org.fossasia.phimpme.data.local.FavouriteImagesModel; import org.fossasia.phimpme.data.local.ImageDescModel; import org.fossasia.phimpme.data.local.UploadHistoryRealmModel; import org.fossasia.phimpme.gallery.SelectAlbumBottomSheet; import org.fossasia.phimpme.gallery.adapters.AlbumsAdapter; import org.fossasia.phimpme.gallery.adapters.MediaAdapter; import org.fossasia.phimpme.gallery.data.Album; import org.fossasia.phimpme.gallery.data.CustomAlbumsHelper; import org.fossasia.phimpme.gallery.data.HandlingAlbums; import org.fossasia.phimpme.gallery.data.Media; import org.fossasia.phimpme.gallery.data.base.MediaComparators; import org.fossasia.phimpme.gallery.data.base.SortingMode; import org.fossasia.phimpme.gallery.data.base.SortingOrder; import org.fossasia.phimpme.gallery.data.providers.MediaStoreProvider; import org.fossasia.phimpme.gallery.data.providers.StorageProvider; import org.fossasia.phimpme.gallery.util.Affix; import org.fossasia.phimpme.gallery.util.AlertDialogsHelper; import org.fossasia.phimpme.gallery.util.ContentHelper; import org.fossasia.phimpme.gallery.util.Measure; import org.fossasia.phimpme.gallery.util.PreferenceUtil; import org.fossasia.phimpme.gallery.util.SecurityHelper; import org.fossasia.phimpme.gallery.util.StringUtils; import org.fossasia.phimpme.gallery.views.GridSpacingItemDecoration; import org.fossasia.phimpme.uploadhistory.UploadHistory; import org.fossasia.phimpme.utilities.ActivitySwitchHelper; import org.fossasia.phimpme.utilities.Constants; import org.fossasia.phimpme.utilities.NotificationHandler; import org.fossasia.phimpme.utilities.SnackBarHandler; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.Locale; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import butterknife.BindView; import butterknife.ButterKnife; import io.realm.Realm; import io.realm.RealmQuery; import io.realm.RealmResults; import static org.fossasia.phimpme.gallery.data.base.SortingMode.DATE; import static org.fossasia.phimpme.gallery.data.base.SortingMode.NAME; import static org.fossasia.phimpme.gallery.data.base.SortingMode.NUMERIC; import static org.fossasia.phimpme.gallery.data.base.SortingMode.SIZE; import static org.fossasia.phimpme.gallery.util.ThemeHelper.LIGHT_THEME; public class LFMainActivity extends SharedMediaActivity { private static String TAG = "AlbumsAct"; private int REQUEST_CODE_SD_CARD_PERMISSIONS = 42; private static final int BUFFER = 80000; private boolean about = false, settings = false, uploadHistory = false, favourites = false; private CustomAlbumsHelper customAlbumsHelper = CustomAlbumsHelper.getInstance(LFMainActivity.this); private PreferenceUtil SP; private SecurityHelper securityObj; private AlbumsAdapter albumsAdapter; private GridSpacingItemDecoration rvAlbumsDecoration; private MediaAdapter mediaAdapter; private GridSpacingItemDecoration rvMediaDecoration; private SelectAlbumBottomSheet bottomSheetDialogFragment; private boolean hidden = false, pickMode = false, editMode = false, albumsMode = true, firstLaunch = true, localFolder = true, hidenav = false; //to handle pinch gesture private ScaleGestureDetector mScaleGestureDetector; //To handle all photos/Album conditions public boolean all_photos = false; private boolean checkForReveal = true; final String REVIEW_ACTION = "com.android.camera.action.REVIEW"; public static ArrayList<Media> listAll; public int size; public int pos; ArrayList<String> path; private ArrayList<Media> media; private ArrayList<Media> selectedMedias = new ArrayList<>(); public boolean visible; private ArrayList<Album> albList; //To handle favourite collection private Realm realm; private ArrayList<Media> favouriteslist; public boolean fav_photos = false; private IconicsImageView favicon; // To handle back pressed boolean doubleBackToExitPressedOnce = false; private boolean fromOnClick = false; // Binding various views with Butterknife @BindView(R.id.toolbar) protected Toolbar toolbar; @BindView(R.id.grid_albums) protected RecyclerView rvAlbums; @BindView(R.id.grid_photos) protected RecyclerView rvMedia; @BindView(R.id.swipeRefreshLayout) protected SwipeRefreshLayout swipeRefreshLayout; @BindView(R.id.drawer_layout) protected DrawerLayout mDrawerLayout; @BindView(R.id.fab_scroll_up) protected FloatingActionButton fabScrollUp; @BindView(R.id.Drawer_Setting_Item) protected TextView drawerSettingText; @BindView(R.id.Drawer_About_Item) protected TextView drawerAboutText; @BindView(R.id.Drawer_share_Item) protected TextView drawerShareText; @BindView(R.id.Drawer_rate_Item) protected TextView drawerRateText; @BindView(R.id.Drawer_Upload_Item) protected TextView drawerUploadText; @BindView(R.id.Drawer_Setting_Icon) protected IconicsImageView drawerSettingIcon; @BindView(R.id.Drawer_About_Icon) protected IconicsImageView drawerAboutIcon; @BindView(R.id.Drawer_share_Icon) protected IconicsImageView drawerShareIcon; @BindView(R.id.Drawer_rate_Icon) protected IconicsImageView drawerRateIcon; @BindView(R.id.Drawer_Upload_Icon) protected IconicsImageView drawerUploadIcon; @BindView(R.id.drawer_scrollbar) protected ScrollView scrollView; @BindView(R.id.appbar_toolbar) protected View toolbari; @BindView(R.id.nothing_to_show) protected TextView nothingToShow; @BindView(R.id.Drawer_Default_Icon) protected IconicsImageView defaultIcon; @BindView(R.id.Drawer_hidden_Icon) protected IconicsImageView hiddenIcon; @BindView(R.id.Drawer_Default_Item) protected TextView defaultText; @BindView(R.id.Drawer_hidden_Item) protected TextView hiddenText; @BindView(R.id.star_image_view) protected ImageView starImageView; /* editMode- When true, user can select items by clicking on them one by one */ /** * Handles long clicks on photos. * If first long click on photo (editMode = false), go into selection mode and set editMode = true. * If not first long click, means that already in selection mode- s0 select all photos upto chosen one. */ private View.OnLongClickListener photosOnLongClickListener = new View.OnLongClickListener() { @Override public boolean onLongClick(View v) { if (checkForReveal) { enterReveal(); checkForReveal = false; } Media m = (Media) v.findViewById(R.id.photo_path).getTag(); //If first long press, turn on selection mode hideNavigationBar(); hidenav = true; if (!all_photos && !fav_photos) { appBarOverlay(); if (!editMode) { mediaAdapter.notifyItemChanged(getAlbum().toggleSelectPhoto(m)); editMode = true; } else getAlbum().selectAllPhotosUpTo(getAlbum().getIndex(m), mediaAdapter); invalidateOptionsMenu(); } else if (all_photos && !fav_photos) { if (!editMode) { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); editMode = true; } } else if (fav_photos && !all_photos) { if (!editMode) { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); editMode = true; } } else selectAllPhotosUpTo(getImagePosition(m.getPath()), mediaAdapter); return true; } }; /** * Helper method for making reveal animation for toolbar when any item is selected by long click. */ private void enterReveal() { // get the center for the clipping circle int cx = toolbari.getMeasuredWidth() / 2; int cy = toolbari.getMeasuredHeight() / 2; // get the final radius for the clipping circle int finalRadius = Math.max(toolbari.getWidth(), toolbari.getHeight()) / 2; // create the animator for this view Animator anim = ViewAnimationUtils.createCircularReveal(toolbari, cx, cy, 5, finalRadius); anim.start(); } /** * Helper method for making reveal animation for toolbar when back is presses in edit mode. */ private void exitReveal() { // get the center for the clipping circle int cx = toolbari.getMeasuredWidth() / 2; int cy = toolbari.getMeasuredHeight() / 2; // get the final radius for the clipping circle int finalRadius = 0; // create the animator for this view Animator anim = ViewAnimationUtils.createCircularReveal(toolbari, cx, cy, cx, finalRadius); anim.start(); } private int toggleSelectPhoto(Media m) { if (m != null) { m.setSelected(!m.isSelected()); if (m.isSelected()) selectedMedias.add(m); else selectedMedias.remove(m); } if (selectedMedias.size() == 0) { getNavigationBar(); editMode = false; toolbar.setTitle(getString(R.string.all)); } else { if (!fav_photos) { toolbar.setTitle(selectedMedias.size() + "/" + size); } else if (fav_photos) { toolbar.setTitle(selectedMedias.size() + "/" + favouriteslist.size()); } } invalidateOptionsMenu(); return getImagePosition(m.getPath()); } public void clearSelectedPhotos() { for (Media m : selectedMedias) m.setSelected(false); if (selectedMedias != null) selectedMedias.clear(); if (localFolder) toolbar.setTitle(getString(R.string.local_folder)); else toolbar.setTitle(getString(R.string.hidden_folder)); } public void selectAllPhotos() { if (all_photos && !fav_photos) { for (Media m : listAll) { m.setSelected(true); selectedMedias.add(m); } toolbar.setTitle(selectedMedias.size() + "/" + size); } else if (!all_photos && fav_photos) { for (Media m : favouriteslist) { m.setSelected(true); if (m.isSelected()) selectedMedias.add(m); } toolbar.setTitle(selectedMedias.size() + "/" + favouriteslist.size()); } } public void selectAllPhotosUpTo(int targetIndex, MediaAdapter adapter) { int indexRightBeforeOrAfter = -1; int indexNow; for (Media sm : selectedMedias) { indexNow = getImagePosition(sm.getPath()); if (indexRightBeforeOrAfter == -1) indexRightBeforeOrAfter = indexNow; if (indexNow > targetIndex) break; indexRightBeforeOrAfter = indexNow; } if (indexRightBeforeOrAfter != -1) { for (int index = Math.min(targetIndex, indexRightBeforeOrAfter); index <= Math.max(targetIndex, indexRightBeforeOrAfter); index++) { if (listAll.get(index) != null && !listAll.get(index).isSelected()) { listAll.get(index).setSelected(true); selectedMedias.add(listAll.get(index)); adapter.notifyItemChanged(index); } } } toolbar.setTitle(selectedMedias.size() + "/" + size); } public void populateAlbum() { albList = new ArrayList<>(); for (Album album : getAlbums().dispAlbums) { albList.add(album); } } /** * Handles short clicks on photos. * If in selection mode (editMode = true) , select the photo if it is unselected and unselect it if it's selected. * This mechanism makes it possible to select photos one by one by short-clicking on them. * If not in selection mode (editMode = false) , get current photo from album and open it in singleActivity */ private View.OnClickListener photosOnClickListener = new View.OnClickListener() { @Override public void onClick(View v) { Media m = (Media) v.findViewById(R.id.photo_path).getTag(); if (all_photos) { pos = getImagePosition(m.getPath()); } if (fav_photos) { pos = getImagePosition(m.getPath()); } if (!all_photos && !fav_photos) { if (!pickMode) { //if in selection mode, toggle the selected/unselect state of photo if (editMode) { appBarOverlay(); mediaAdapter.notifyItemChanged(getAlbum().toggleSelectPhoto(m)); if (getAlbum().selectedMedias.size() == 0) getNavigationBar(); invalidateOptionsMenu(); } else { v.setTransitionName(getString(R.string.transition_photo)); getAlbum().setCurrentPhotoIndex(m); Intent intent = new Intent(LFMainActivity.this, SingleMediaActivity.class); intent.putExtra("path", Uri.fromFile(new File(m.getPath())).toString()); ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(LFMainActivity.this, v, v.getTransitionName()); intent.setAction(SingleMediaActivity.ACTION_OPEN_ALBUM); startActivity(intent, options.toBundle()); } } else { setResult(RESULT_OK, new Intent().setData(m.getUri())); finish(); } } else if (all_photos && !fav_photos) { if (!editMode) { Intent intent = new Intent(REVIEW_ACTION, Uri.fromFile(new File(m.getPath()))); intent.putExtra(getString(R.string.all_photo_mode), true); intent.putExtra(getString(R.string.position), pos); intent.putExtra(getString(R.string.allMediaSize), size); v.setTransitionName(getString(R.string.transition_photo)); ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(LFMainActivity.this, v, v.getTransitionName()); intent.setClass(getApplicationContext(), SingleMediaActivity.class); startActivity(intent, options.toBundle()); } else { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); } } else if (!all_photos && fav_photos) { if (!editMode) { Intent intent = new Intent(REVIEW_ACTION, Uri.fromFile(new File(m.getPath()))); intent.putExtra("fav_photos", true); intent.putExtra(getString(R.string.position), pos); intent.putParcelableArrayListExtra("favouriteslist", favouriteslist); intent.putExtra(getString(R.string.allMediaSize), favouriteslist.size()); v.setTransitionName(getString(R.string.transition_photo)); ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(LFMainActivity.this, v, v.getTransitionName()); intent.setClass(getApplicationContext(), SingleMediaActivity.class); startActivity(intent, options.toBundle()); } else { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); } } } }; private View.OnLongClickListener albumOnLongCLickListener = new View.OnLongClickListener() { @Override public boolean onLongClick(View v) { final Album album = (Album) v.findViewById(R.id.album_name).getTag(); if(securityObj.isActiveSecurity() && securityObj.isPasswordOnfolder()) { final boolean passco[] = {false}; if (check(album.getPath())) { AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog on wrong password } }); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor( new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE) .setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { passwordDialog.dismiss(); if (checkForReveal) { enterReveal(); checkForReveal = false; } albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); editMode = true; invalidateOptionsMenu(); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); else { hideNavigationBar(); hidenav = true; } } // if password is incorrect, notify user of incorrect password else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler .showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), navigationView.getHeight()); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { if (checkForReveal) { enterReveal(); checkForReveal = false; } albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); editMode = true; invalidateOptionsMenu(); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); else { hideNavigationBar(); hidenav = true; } } } else { if (checkForReveal) { enterReveal(); checkForReveal = false; } albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); editMode = true; invalidateOptionsMenu(); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); else { hideNavigationBar(); hidenav = true; } } return true; } }; private boolean check(String path) { boolean dr = false; for (String s : securityObj.getSecuredfolders()) { if (s.equals(path)) { dr = true; break; } } return dr; } private View.OnClickListener albumOnClickListener = new View.OnClickListener() { @Override public void onClick(View v) { fromOnClick = true; final Album album = (Album) v.findViewById(R.id.album_name).getTag(); //int index = Integer.parseInt(v.findViewById(R.id.album_name).getTag().toString()); if (editMode) { albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); invalidateOptionsMenu(); } else if(securityObj.isActiveSecurity() && securityObj.isPasswordOnfolder()){ final boolean[] passco = {false}; if (check(album.getPath())) { AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog on wrong password } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor( new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE) .setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { passwordDialog.dismiss(); getAlbums().setCurrentAlbum(album); displayCurrentAlbumMedia(true); } // if password is incorrect, notify user of incorrect password else { passco[0] =true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler .showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), navigationView.getHeight()); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { getAlbums().setCurrentAlbum(album); displayCurrentAlbumMedia(true); } } else { getAlbums().setCurrentAlbum(album); displayCurrentAlbumMedia(true); } } }; /** * Method for clearing the scroll flags. */ private void appBarOverlay() { AppBarLayout.LayoutParams params = (AppBarLayout.LayoutParams) toolbar.getLayoutParams(); params.setScrollFlags(AppBarLayout.LayoutParams.SCROLL_FLAG_EXIT_UNTIL_COLLAPSED); // clear all scroll flags } /** * Method for adding the scroll flags. */ private void clearOverlay() { AppBarLayout.LayoutParams params = (AppBarLayout.LayoutParams) toolbar.getLayoutParams(); params.setScrollFlags(AppBarLayout.LayoutParams.SCROLL_FLAG_SCROLL | AppBarLayout.LayoutParams.SCROLL_FLAG_ENTER_ALWAYS); } public int getImagePosition(String path) { int pos = 0; if (all_photos) { for (int i = 0; i < listAll.size(); i++) { if (listAll.get(i).getPath().equals(path)) { pos = i; break; } } } else if (fav_photos) { Collections.sort(favouriteslist, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings .getSortingOrder())); for (int i = 0; i < favouriteslist.size(); i++) { if (favouriteslist.get(i).getPath().equals(path)) { pos = i; break; } } } return pos; } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Log.e("TAG", "lfmain"); ButterKnife.bind(this); BottomNavigationView navigationView = (BottomNavigationView) findViewById(R.id.bottombar); favicon = (IconicsImageView) findViewById(R.id.Drawer_favourite_Icon); this.overridePendingTransition(R.anim.left_to_right, R.anim.right_to_left); SP = PreferenceUtil.getInstance(getApplicationContext()); albumsMode = true; editMode = false; securityObj = new SecurityHelper(LFMainActivity.this); if (getIntent().getExtras() != null) pickMode = getIntent().getExtras().getBoolean(SplashScreen.PICK_MODE); SP.putBoolean(getString(R.string.preference_use_alternative_provider), false); initUI(); new initAllPhotos().execute(); new SortModeSet().execute(DATE); displayData(getIntent().getExtras()); checkNothing(); populateAlbum(); navigationView.setOnNavigationItemSelectedListener(new BottomNavigationView.OnNavigationItemSelectedListener() { @Override public boolean onNavigationItemSelected(@NonNull MenuItem item) { int itemID = item.getItemId(); if (itemID == R.id.navigation_home) { if (!localFolder) { hidden = false; localFolder = true; findViewById(R.id.ll_drawer_hidden).setBackgroundColor(Color.TRANSPARENT); findViewById(R.id.ll_drawer_Default).setBackgroundColor(getHighlightedItemColor()); tint(); } displayAlbums(); return true; } return LFMainActivity.super.onNavigationItemSelected(item); } }); } @Override public void onResume() { super.onResume(); ActivitySwitchHelper.setContext(this); securityObj.updateSecuritySetting(); setupUI(); if (all_photos && !fav_photos) { new PrepareAllPhotos().execute(); } if (!all_photos && fav_photos) { new FavouritePhotos().execute(); } if (!all_photos && !fav_photos) { if (SP.getBoolean("auto_update_media", false)) { if (albumsMode) { if (!firstLaunch) new PrepareAlbumTask().execute(); } else new PreparePhotosTask().execute(); } else { albumsAdapter.notifyDataSetChanged(); mediaAdapter.notifyDataSetChanged(); } } invalidateOptionsMenu(); firstLaunch = false; } private void displayCurrentAlbumMedia(boolean reload) { toolbar.setTitle(getAlbum().getName()); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_LOCKED_CLOSED); mediaAdapter.swapDataSet(getAlbum().getMedia()); if (reload) new PreparePhotosTask().execute(); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); albumsMode = editMode = false; invalidateOptionsMenu(); } private void displayAllMedia(boolean reload) { clearSelectedPhotos(); toolbar.setTitle(getString(R.string.all_media)); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_LOCKED_CLOSED); mediaAdapter.swapDataSet(listAll); if (reload) new PrepareAllPhotos().execute(); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); albumsMode = editMode = false; invalidateOptionsMenu(); } private void getfavouriteslist() { favouriteslist = new ArrayList<Media>(); realm = Realm.getDefaultInstance(); RealmQuery<FavouriteImagesModel> favouriteImagesModelRealmQuery = realm.where(FavouriteImagesModel.class); int count = Integer.parseInt(String.valueOf(favouriteImagesModelRealmQuery.count())); for (int i = 0; i < count; i++) { final String path = favouriteImagesModelRealmQuery.findAll().get(i).getPath(); if (new File(favouriteImagesModelRealmQuery.findAll().get(i).getPath()).exists()) { favouriteslist.add(new Media(new File(favouriteImagesModelRealmQuery.findAll().get(i).getPath()))); } else { realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { RealmResults<FavouriteImagesModel> result = realm.where(FavouriteImagesModel.class).equalTo ("path", path).findAll(); result.deleteAllFromRealm(); } }); } } } private void displayfavourites() { toolbar.setTitle(getResources().getString(R.string.favourite_title)); getfavouriteslist(); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_LOCKED_CLOSED); fav_photos = true; mediaAdapter.swapDataSet(favouriteslist); if (fav_photos) { new FavouritePhotos().execute(); } toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); albumsMode = editMode = all_photos = false; invalidateOptionsMenu(); } private void displayAlbums() { all_photos = false; fav_photos = false; displayAlbums(true); } private void displayAlbums(boolean reload) { if (localFolder) { toolbar.setTitle(getString(R.string.local_folder)); } else { toolbar.setTitle(getString(R.string.hidden_folder)); } toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_menu)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_UNLOCKED); albumsAdapter.swapDataSet(getAlbums().dispAlbums); if (reload) new PrepareAlbumTask().execute(); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { mDrawerLayout.openDrawer(GravityCompat.START); } }); albumsMode = true; editMode = false; invalidateOptionsMenu(); mediaAdapter.swapDataSet(new ArrayList<Media>()); rvMedia.scrollToPosition(0); } @Override public void onConfigurationChanged(Configuration newConfig) { super.onConfigurationChanged(newConfig); updateColumnsRvs(); } private boolean displayData(Bundle data) { if (data != null) { switch (data.getInt(SplashScreen.CONTENT)) { case SplashScreen.ALBUMS_PREFETCHED: displayAlbums(false); // we pass the albumMode here . If true, show rvAlbum recyclerView. If false, show rvMedia recyclerView toggleRecyclersVisibility(true); return true; case SplashScreen.ALBUMS_BACKUP: displayAlbums(true); // we pass the albumMode here . If true, show rvAlbum recyclerView. If false, show rvMedia recyclerView toggleRecyclersVisibility(true); return true; case SplashScreen.PHOTOS_PREFETCHED: //TODO ask password if hidden new Thread(new Runnable() { @Override public void run() { getAlbums().loadAlbums(getApplicationContext(), getAlbum().isHidden()); } }).start(); displayCurrentAlbumMedia(false); // we pass the albumMode here . If true, show rvAlbum recyclerView. If false, show rvMedia recyclerView toggleRecyclersVisibility(false); return true; } } displayAlbums(true); return false; } private class initAllPhotos extends AsyncTask<Void, Void, Void> { @Override protected Void doInBackground(Void... arg0) { listAll = StorageProvider.getAllShownImages(LFMainActivity.this); size = listAll.size(); media = listAll; Collections.sort(listAll, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings.getSortingOrder())); return null; } } private void initUI() { clearOverlay(); setSupportActionBar(toolbar); rvAlbums.setHasFixedSize(true); rvAlbums.setItemAnimator(new DefaultItemAnimator()); rvMedia.setHasFixedSize(true); rvMedia.setItemAnimator(new DefaultItemAnimator()); albumsAdapter = new AlbumsAdapter(getAlbums().dispAlbums, LFMainActivity.this); albumsAdapter.setOnClickListener(albumOnClickListener); albumsAdapter.setOnLongClickListener(albumOnLongCLickListener); rvAlbums.setAdapter(albumsAdapter); //set scale gesture detector for resizing the gridItem mScaleGestureDetector = new ScaleGestureDetector(this, new ScaleGestureDetector.SimpleOnScaleGestureListener() { @Override public boolean onScale(ScaleGestureDetector detector) { if (detector.getCurrentSpan() > 200 && detector.getTimeDelta() > 200) { int spanCount = columnsCount(); //zooming out if ((detector.getCurrentSpan() - detector.getPreviousSpan() < -300) && spanCount < 6) { SP.putInt("n_columns_folders", spanCount + 1); updateColumnsRvAlbums(); } //zooming in else if ((detector.getCurrentSpan() - detector.getPreviousSpan() > 300) && spanCount > 1) { SP.putInt("n_columns_folders", spanCount - 1); updateColumnsRvAlbums(); } } return false; } }); //set touch listener on recycler view rvAlbums.setOnTouchListener(new View.OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { mScaleGestureDetector.onTouchEvent(event); return false; } }); mediaAdapter = new MediaAdapter(getAlbum().getMedia(), LFMainActivity.this); mediaAdapter.setOnClickListener(photosOnClickListener); mediaAdapter.setOnLongClickListener(photosOnLongClickListener); rvMedia.setAdapter(mediaAdapter); int spanCount = columnsCount(); rvAlbumsDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvAlbums.addItemDecoration(rvAlbumsDecoration); rvAlbums.setLayoutManager(new GridLayoutManager(this, spanCount)); spanCount = mediaCount(); rvMediaDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvMedia.setLayoutManager(new GridLayoutManager(getApplicationContext(), spanCount)); rvMedia.addItemDecoration(rvMediaDecoration); /**** SWIPE TO REFRESH ****/ swipeRefreshLayout.setColorSchemeColors(getAccentColor()); swipeRefreshLayout.setProgressBackgroundColorSchemeColor(getBackgroundColor()); swipeRefreshLayout.setOnRefreshListener(new SwipeRefreshLayout.OnRefreshListener() { @Override public void onRefresh() { getNavigationBar(); if (albumsMode) { getAlbums().clearSelectedAlbums(); new PrepareAlbumTask().execute(); } else { if (!all_photos && !fav_photos) { getAlbum().clearSelectedPhotos(); new PreparePhotosTask().execute(); } else { if (all_photos && !fav_photos) { new PrepareAllPhotos().execute(); } else if (!all_photos && fav_photos) { new FavouritePhotos().execute(); } } } } }); /**** DRAWER ****/ mDrawerLayout.addDrawerListener(new ActionBarDrawerToggle(this, mDrawerLayout, toolbar, R.string.drawer_open, R.string.drawer_close) { public void onDrawerClosed(View view) { //Put your code here // materialMenu.animateIconState(MaterialMenuDrawable.IconState.BURGER); Intent intent = null; if (settings) { intent = new Intent(LFMainActivity.this, SettingsActivity.class); startActivity(intent); settings = false; } else if (about) { intent = new Intent(LFMainActivity.this, AboutActivity.class); startActivity(intent); about = false; } else if (uploadHistory) { intent = new Intent(LFMainActivity.this, UploadHistory.class); startActivity(intent); uploadHistory = false; } else if (favourites) { displayfavourites(); favourites = false; } } public void onDrawerOpened(View drawerView) { //Put your code here //materialMenu.animateIconState(MaterialMenuDrawable.IconState.ARROW); } }); /** * Floating Action Button to Scroll Up */ setUpFab(); setRecentApp(getString(R.string.app_name)); setupUI(); if (pickMode) { hideNavigationBar(); swipeRefreshLayout.setPadding(0, 0, 0, 0); } } /** * Method to set scroll listeners for recycler view */ private void setUpFab() { fabScrollUp.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { rvMedia.smoothScrollToPosition(0); fabScrollUp.hide(); } }); fabScrollUp.hide(); rvMedia.addOnScrollListener(new RecyclerView.OnScrollListener() { @Override public void onScrolled(RecyclerView recyclerView, int dx, int dy) { LinearLayoutManager linearLayoutManager = (LinearLayoutManager) recyclerView.getLayoutManager(); if (linearLayoutManager.findFirstVisibleItemPosition() > 30 && !fabScrollUp.isShown()) fabScrollUp.show(); else if (linearLayoutManager.findFirstVisibleItemPosition() < 30 && fabScrollUp.isShown()) fabScrollUp.hide(); fabScrollUp.setAlpha(0.7f); } }); } public int columnsCount() { return getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT ? SP.getInt("n_columns_folders", 2) : SP.getInt("n_columns_folders_landscape", 3); } public int mediaCount() { return getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT ? SP.getInt("n_columns_media", 3) : SP.getInt("n_columns_media_landscape", 4); } private void updateColumnsRvs() { updateColumnsRvAlbums(); updateColumnsRvMedia(); } private void updateColumnsRvAlbums() { int spanCount = columnsCount(); if (spanCount != ((GridLayoutManager) rvAlbums.getLayoutManager()).getSpanCount()) { rvAlbums.removeItemDecoration(rvAlbumsDecoration); rvAlbumsDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvAlbums.addItemDecoration(rvAlbumsDecoration); rvAlbums.setLayoutManager(new GridLayoutManager(this, spanCount)); } } private void updateColumnsRvMedia() { int spanCount = mediaCount(); if (spanCount != ((GridLayoutManager) rvMedia.getLayoutManager()).getSpanCount()) { ((GridLayoutManager) rvMedia.getLayoutManager()).getSpanCount(); rvMedia.removeItemDecoration(rvMediaDecoration); rvMediaDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvMedia.setLayoutManager(new GridLayoutManager(getApplicationContext(), spanCount)); rvMedia.addItemDecoration(rvMediaDecoration); } } //region TESTING @TargetApi(Build.VERSION_CODES.LOLLIPOP) @Override public final void onActivityResult(final int requestCode, final int resultCode, final Intent resultData) { if (resultCode == RESULT_OK) { if (requestCode == REQUEST_CODE_SD_CARD_PERMISSIONS) { Uri treeUri = resultData.getData(); // Persist URI in shared preference so that you can use it later. ContentHelper.saveSdCardInfo(getApplicationContext(), treeUri); getContentResolver().takePersistableUriPermission(treeUri, Intent.FLAG_GRANT_WRITE_URI_PERMISSION); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.got_permission_wr_sdcard), 0); } } } //endregion private void requestSdCardPermissions() { final AlertDialog.Builder dialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialogsHelper.getTextDialog(LFMainActivity.this, dialogBuilder, R.string.sd_card_write_permission_title, R.string.sd_card_permissions_message, null); dialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialogInterface, int i) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) startActivityForResult(new Intent(Intent.ACTION_OPEN_DOCUMENT_TREE), REQUEST_CODE_SD_CARD_PERMISSIONS); } }); AlertDialog alertDialog = dialogBuilder.create(); alertDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialog); } //region UI/GRAPHIC private void setupUI() { updateColumnsRvs(); //TODO: MUST BE FIXED toolbar.setPopupTheme(getPopupToolbarStyle()); toolbar.setBackgroundColor(getPrimaryColor()); if (localFolder) { toolbar.setTitle(getString(R.string.local_folder)); } else { toolbar.setTitle(getString(R.string.hidden_folder)); } /**** SWIPE TO REFRESH ****/ swipeRefreshLayout.setColorSchemeColors(getAccentColor()); swipeRefreshLayout.setProgressBackgroundColorSchemeColor(getBackgroundColor()); setStatusBarColor(); setNavBarColor(); setDrawerTheme(); rvAlbums.setBackgroundColor(getBackgroundColor()); rvMedia.setBackgroundColor(getBackgroundColor()); mediaAdapter.updatePlaceholder(getApplicationContext()); albumsAdapter.updateTheme(); /**** DRAWER ****/ setScrollViewColor(scrollView); /**** recyclers drawable *****/ Drawable drawableScrollBar = ContextCompat.getDrawable(getApplicationContext(), R.drawable.ic_scrollbar); drawableScrollBar.setColorFilter(new PorterDuffColorFilter(getPrimaryColor(), PorterDuff.Mode.SRC_ATOP)); /**** FAB ****/ fabScrollUp.setBackgroundTintList(ColorStateList.valueOf(getAccentColor())); fabScrollUp.setAlpha(0.7f); } private void setDrawerTheme() { findViewById(R.id.Drawer_Header).setBackgroundColor(getPrimaryColor()); findViewById(R.id.Drawer_Body).setBackgroundColor(getDrawerBackground()); findViewById(R.id.drawer_scrollbar).setBackgroundColor(getDrawerBackground()); findViewById(R.id.Drawer_Body_Divider).setBackgroundColor(getIconColor()); /** TEXT VIEWS **/ int color = getTextColor(); defaultText.setTextColor(color); drawerSettingText.setTextColor(color); drawerAboutText.setTextColor(color); hiddenText.setTextColor(color); drawerShareText.setTextColor(color); drawerRateText.setTextColor(color); drawerUploadText.setTextColor(color); ((TextView) findViewById(R.id.Drawer_Default_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_Setting_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_About_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_hidden_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_share_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_rate_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_Upload_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_favourite_Item)).setTextColor(color); /** ICONS **/ color = getIconColor(); defaultIcon.setColor(color); drawerSettingIcon.setColor(color); drawerAboutIcon.setColor(color); hiddenIcon.setColor(color); drawerShareIcon.setColor(color); drawerRateIcon.setColor(color); drawerUploadIcon.setColor(color); favicon.setColor(color); // Default setting if (localFolder) findViewById(R.id.ll_drawer_Default).setBackgroundColor(getHighlightedItemColor()); else findViewById(R.id.ll_drawer_hidden).setBackgroundColor(getHighlightedItemColor()); tint(); findViewById(R.id.ll_drawer_Setting).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { settings = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_About).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { about = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_favourites).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { favourites = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_uploadhistory).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { uploadHistory = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_Default).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { localFolder = true; findViewById(R.id.ll_drawer_hidden).setBackgroundColor(Color.TRANSPARENT); findViewById(R.id.ll_drawer_Default).setBackgroundColor(getHighlightedItemColor()); tint(); toolbar.setTitle(getString(R.string.local_folder)); hidden = false; mDrawerLayout.closeDrawer(GravityCompat.START); new PrepareAlbumTask().execute(); } }); findViewById(R.id.ll_drawer_hidden).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { localFolder = false; findViewById(R.id.ll_drawer_Default).setBackgroundColor(Color.TRANSPARENT); findViewById(R.id.ll_drawer_hidden).setBackgroundColor(getHighlightedItemColor()); tint(); toolbar.setTitle(getString(R.string.hidden_folder)); if (securityObj.isActiveSecurity() && securityObj.isPasswordOnHidden()) { final boolean[] passco = {false}; AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE).setOnClickListener(new View .OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { hidden = true; mDrawerLayout.closeDrawer(GravityCompat.START); new PrepareAlbumTask().execute(); passwordDialog.dismiss(); } else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), 0); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { hidden = true; mDrawerLayout.closeDrawer(GravityCompat.START); new PrepareAlbumTask().execute(); } } }); findViewById(R.id.ll_share_phimpme).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { onInviteClicked(); mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_rate_phimpme).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { final String appPackageName = getPackageName(); try { startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse("market://details?id=" + appPackageName))); } catch (android.content.ActivityNotFoundException anfe) { startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse("https://play.google.com/store/apps/details?id=" + appPackageName))); } mDrawerLayout.closeDrawer(GravityCompat.START); } }); } private void onInviteClicked() { Intent sendIntent = new Intent(); sendIntent.setAction(Intent.ACTION_SEND); sendIntent.putExtra(Intent.EXTRA_TEXT, getString(R.string.install_phimpme) + "\n " + getString(R.string.invitation_deep_link)); sendIntent.setType("text/plain"); startActivity(sendIntent); } //endregion private void updateSelectedStuff() { if (albumsMode) { if (getAlbums().getSelectedCount() == 0) { clearOverlay(); checkForReveal = true; swipeRefreshLayout.setEnabled(true); } else { appBarOverlay(); swipeRefreshLayout.setEnabled(false); } if (editMode) toolbar.setTitle(getAlbums().getSelectedCount() + "/" + getAlbums().dispAlbums.size()); else { if (hidden) toolbar.setTitle(getString(R.string.hidden_folder)); else toolbar.setTitle(getString(R.string.local_folder)); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_menu)); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { mDrawerLayout.openDrawer(GravityCompat.START); } }); } } else { if (!all_photos) { if (getAlbum().getSelectedCount() == 0) { clearOverlay(); checkForReveal = true; swipeRefreshLayout.setEnabled(true); } else { appBarOverlay(); swipeRefreshLayout.setEnabled(false); } } else { if (selectedMedias.size() == 0) { clearOverlay(); swipeRefreshLayout.setEnabled(true); } else { appBarOverlay(); swipeRefreshLayout.setEnabled(false); } } if (editMode) { if (!all_photos && !fav_photos) toolbar.setTitle(getAlbum().getSelectedCount() + "/" + getAlbum().getMedia().size()); else if (!fav_photos && all_photos) { toolbar.setTitle(selectedMedias.size() + "/" + size); } else if (fav_photos && !all_photos) { toolbar.setTitle(selectedMedias.size() + "/" + favouriteslist.size()); } } else { if (!all_photos && !fav_photos) toolbar.setTitle(getAlbum().getName()); else if (all_photos && !fav_photos) { toolbar.setTitle(getString(R.string.all_media)); } else if (fav_photos && !all_photos) { toolbar.setTitle(getResources().getString(R.string.favourite_title)); } toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); } } if (editMode) { toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_clear)); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { getNavigationBar(); finishEditMode(); clearSelectedPhotos(); } }); } } //called from onBackPressed() private void finishEditMode() { if (editMode) exitReveal(); editMode = false; if (albumsMode) { getAlbums().clearSelectedAlbums(); albumsAdapter.notifyDataSetChanged(); } else { if (!all_photos) { getAlbum().clearSelectedPhotos(); mediaAdapter.notifyDataSetChanged(); } else { clearSelectedPhotos(); mediaAdapter.notifyDataSetChanged(); } } invalidateOptionsMenu(); } private void checkNothing() { nothingToShow.setTextColor(getTextColor()); nothingToShow.setText(getString(R.string.there_is_nothing_to_show)); nothingToShow.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0) || (!albumsMode && getAlbum().getMedia().size() == 0) ? View.VISIBLE : View.GONE); TextView a = (TextView) findViewById(R.id.nothing_to_show); a.setTextColor(getTextColor()); a.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0 && !fav_photos) || (!albumsMode && getAlbum ().getMedia().size() == 0 && !fav_photos) || (fav_photos && favouriteslist.size() == 0) ? View .VISIBLE : View .GONE); starImageView.setVisibility(View.GONE); } private void checkNothingFavourites() { nothingToShow.setTextColor(getTextColor()); nothingToShow.setText(R.string.no_favourites_text); nothingToShow.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0 && !fav_photos) || (!albumsMode && getAlbum ().getMedia().size() == 0 && !fav_photos) || (fav_photos && favouriteslist.size() == 0) ? View .VISIBLE : View .GONE); starImageView.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0 && !fav_photos) || (!albumsMode && getAlbum ().getMedia().size() == 0 && !fav_photos) || (fav_photos && favouriteslist.size() == 0) ? View .VISIBLE : View .GONE); if (getBaseTheme() != LIGHT_THEME) starImageView.setColorFilter(ContextCompat.getColor(this, R.color.white), PorterDuff.Mode.SRC_ATOP); else starImageView.setColorFilter(ContextCompat.getColor(this, R.color.black), PorterDuff.Mode.SRC_ATOP); } //region MENU @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_albums, menu); if (albumsMode) { MenuItem menuitem = menu.findItem(R.id.search_action); final SearchView searchView = (SearchView) MenuItemCompat.getActionView(menuitem); searchView.setOnQueryTextFocusChangeListener(new View.OnFocusChangeListener() { @Override public void onFocusChange(final View view, boolean b) { if (b) { view.postDelayed(new Runnable() { @Override public void run() { InputMethodManager imm = (InputMethodManager) getSystemService(Context .INPUT_METHOD_SERVICE); imm.showSoftInput(view.findFocus(), 0); } }, 200); } } }); searchView.setOnQueryTextListener(new SearchView.OnQueryTextListener() { @Override public boolean onQueryTextSubmit(String query) { return false; } @Override public boolean onQueryTextChange(String newText) { return searchTitle(newText); } }); menu.findItem(R.id.select_all).setTitle( getString(getAlbums().getSelectedCount() == albumsAdapter.getItemCount() ? R.string.clear_selected : R.string.select_all)); menu.findItem(R.id.ascending_sort_action).setChecked(getAlbums().getSortingOrder() == SortingOrder.ASCENDING); switch (getAlbums().getSortingMode()) { case NAME: menu.findItem(R.id.name_sort_action).setChecked(true); break; case SIZE: menu.findItem(R.id.size_sort_action).setChecked(true); break; case DATE: default: menu.findItem(R.id.date_taken_sort_action).setChecked(true); break; case NUMERIC: menu.findItem(R.id.numeric_sort_action).setChecked(true); break; } } else { getfavouriteslist(); menu.findItem(R.id.select_all).setTitle(getString(getAlbum().getSelectedCount() == mediaAdapter .getItemCount() || selectedMedias.size() == size || (selectedMedias.size() == favouriteslist.size () && fav_photos) ? R .string .clear_selected : R.string.select_all)); menu.findItem(R.id.ascending_sort_action).setChecked(getAlbum().settings.getSortingOrder() == SortingOrder.ASCENDING); switch (getAlbum().settings.getSortingMode()) { case NAME: menu.findItem(R.id.name_sort_action).setChecked(true); break; case SIZE: menu.findItem(R.id.size_sort_action).setChecked(true); break; case DATE: default: menu.findItem(R.id.date_taken_sort_action).setChecked(true); break; case NUMERIC: menu.findItem(R.id.numeric_sort_action).setChecked(true); break; } } menu.findItem(R.id.hideAlbumButton).setTitle(hidden ? getString(R.string.unhide) : getString(R.string.hide)); menu.findItem(R.id.delete_action).setIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_delete)); menu.findItem(R.id.sort_action).setIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_sort)); menu.findItem(R.id.sharePhotos).setIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_share)); return true; } public boolean searchTitle(String newText) { if (!fromOnClick) { String queryText = newText; queryText = queryText.toLowerCase(); final ArrayList<Album> newList = new ArrayList<>(); for (Album album : albList) { String name = album.getName().toLowerCase(); if (name.contains(queryText)) { newList.add(album); } } albumsAdapter.swapDataSet(newList); } else { fromOnClick = false; } return true; } @Override public boolean onPrepareOptionsMenu(final Menu menu) { if (albumsMode) { editMode = getAlbums().getSelectedCount() != 0; menu.setGroupVisible(R.id.album_options_menu, editMode); menu.setGroupVisible(R.id.photos_option_men, false); menu.findItem(R.id.all_photos).setVisible(!editMode && !hidden); menu.findItem(R.id.search_action).setVisible(!editMode); if (getAlbums().getSelectedCount() >= 1) { if (getAlbums().getSelectedCount() > 1) { menu.findItem(R.id.album_details).setVisible(false); } if (getAlbums().getSelectedCount() == 1) { menu.findItem(R.id.search_action).setVisible(false); } } } else { menu.findItem(R.id.search_action).setVisible(false); if (!all_photos && !fav_photos) { editMode = getAlbum().areMediaSelected(); menu.setGroupVisible(R.id.photos_option_men, editMode); menu.setGroupVisible(R.id.album_options_menu, !editMode); menu.findItem(R.id.all_photos).setVisible(false); menu.findItem(R.id.album_details).setVisible(false); } else if (all_photos && !fav_photos) { editMode = selectedMedias.size() != 0; menu.setGroupVisible(R.id.photos_option_men, editMode); menu.setGroupVisible(R.id.album_options_menu, !editMode); menu.findItem(R.id.all_photos).setVisible(false); menu.findItem(R.id.album_details).setVisible(false); } else if (!all_photos && fav_photos) { editMode = selectedMedias.size() != 0; menu.setGroupVisible(R.id.photos_option_men, editMode); menu.setGroupVisible(R.id.album_options_menu, !editMode); menu.findItem(R.id.album_details).setVisible(false); menu.findItem(R.id.all_photos).setVisible(false); } } togglePrimaryToolbarOptions(menu); updateSelectedStuff(); visible = getAlbum().getSelectedCount() > 0; menu.findItem(R.id.action_copy).setVisible(visible); menu.findItem(R.id.action_move).setVisible((visible || editMode) && !fav_photos); menu.findItem(R.id.action_add_favourites).setVisible((visible || editMode) && (!albumsMode && !fav_photos)); menu.findItem(R.id.excludeAlbumButton).setVisible(editMode && !all_photos && albumsMode && !fav_photos); menu.findItem(R.id.zipAlbumButton).setVisible(editMode && !all_photos && albumsMode && !fav_photos && !hidden && getAlbums().getSelectedCount() == 1); menu.findItem(R.id.select_all).setVisible(editMode); menu.findItem(R.id.delete_action).setVisible((!albumsMode || editMode) && (!all_photos || editMode) && (!fav_photos || editMode)); menu.findItem(R.id.hideAlbumButton).setVisible(!all_photos && !fav_photos && getAlbums().getSelectedCount() > 0); menu.findItem(R.id.clear_album_preview).setVisible(!albumsMode && getAlbum().hasCustomCover() && !fav_photos && !all_photos); menu.findItem(R.id.renameAlbum).setVisible(((albumsMode && getAlbums().getSelectedCount() == 1) || (!albumsMode && !editMode)) && (!all_photos && !fav_photos)); if (getAlbums().getSelectedCount() == 1) menu.findItem(R.id.set_pin_album).setTitle(getAlbums().getSelectedAlbum(0).isPinned() ? getString(R.string.un_pin) : getString(R.string.pin)); menu.findItem(R.id.set_pin_album).setVisible(albumsMode && getAlbums().getSelectedCount() == 1); menu.findItem(R.id.setAsAlbumPreview).setVisible(!albumsMode && !all_photos && getAlbum() .getSelectedCount() == 1); menu.findItem(R.id.affixPhoto).setVisible((!albumsMode && (getAlbum().getSelectedCount() > 1) || selectedMedias.size() > 1) && !fav_photos); if (albumsMode) menu.findItem(R.id.action_move).setVisible(getAlbums().getSelectedCount() == 1); return super.onPrepareOptionsMenu(menu); } private void togglePrimaryToolbarOptions(final Menu menu) { menu.setGroupVisible(R.id.general_action, !editMode); } //endregion @Override public boolean onOptionsItemSelected(MenuItem item) { getNavigationBar(); switch (item.getItemId()) { case R.id.all_photos: if (!all_photos) { all_photos = true; displayAllMedia(true); } else { displayAlbums(); } return true; case R.id.album_details: AlertDialog.Builder detailsDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialog detailsDialog; detailsDialog = AlertDialogsHelper.getAlbumDetailsDialog(this, detailsDialogBuilder, getAlbums().getSelectedAlbum(0)); detailsDialog.setButton(DialogInterface.BUTTON_POSITIVE, getString(R.string .ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { finishEditMode(); } }); detailsDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, getAccentColor(), detailsDialog); return true; case R.id.select_all: if (albumsMode) { //if all albums are already selected, unselect all of them if (getAlbums().getSelectedCount() == albumsAdapter.getItemCount()) { editMode = false; getAlbums().clearSelectedAlbums(); } // else, select all albums else getAlbums().selectAllAlbums(); albumsAdapter.notifyDataSetChanged(); } else { if (!all_photos && !fav_photos) { //if all photos are already selected, unselect all of them if (getAlbum().getSelectedCount() == mediaAdapter.getItemCount()) { editMode = false; getAlbum().clearSelectedPhotos(); } // else, select all photos else getAlbum().selectAllPhotos(); mediaAdapter.notifyDataSetChanged(); } else if (all_photos && !fav_photos) { if (selectedMedias.size() == size) { editMode = false; clearSelectedPhotos(); } // else, select all photos else { clearSelectedPhotos(); selectAllPhotos(); } mediaAdapter.notifyDataSetChanged(); } else if (fav_photos && !all_photos) { if (selectedMedias.size() == favouriteslist.size()) { editMode = false; clearSelectedPhotos(); } // else, select all photos else { clearSelectedPhotos(); selectAllPhotos(); } Collections.sort(favouriteslist, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings.getSortingOrder())); mediaAdapter.swapDataSet(favouriteslist); } } invalidateOptionsMenu(); return true; case R.id.set_pin_album: getAlbums().getSelectedAlbum(0).settings.togglePin(getApplicationContext()); getAlbums().sortAlbums(getApplicationContext()); getAlbums().clearSelectedAlbums(); invalidateOptionsMenu(); albumsAdapter.notifyDataSetChanged(); return true; case R.id.settings: startActivity(new Intent(LFMainActivity.this, SettingsActivity.class)); return true; case R.id.hideAlbumButton: final AlertDialog.Builder hideDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialogsHelper.getTextDialog(LFMainActivity.this, hideDialogBuilder, hidden ? R.string.unhide : R.string.hide, hidden ? R.string.unhide_album_message : R.string.hide_album_message, null); hideDialogBuilder.setPositiveButton(getString(hidden ? R.string.unhide : R.string.hide).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { if (albumsMode) { if (hidden) getAlbums().unHideSelectedAlbums(getApplicationContext()); else getAlbums().hideSelectedAlbums(getApplicationContext()); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } else { if (hidden) getAlbums().unHideAlbum(getAlbum().getPath(), getApplicationContext()); else getAlbums().hideAlbum(getAlbum().getPath(), getApplicationContext()); displayAlbums(true); } } }); if (!hidden) { hideDialogBuilder.setNeutralButton(this.getString(R.string.exclude).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { if (albumsMode) { getAlbums().excludeSelectedAlbums(getApplicationContext()); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } else { customAlbumsHelper.excludeAlbum(getAlbum().getPath()); displayAlbums(true); } } }); } hideDialogBuilder.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); AlertDialog alertDialog = hideDialogBuilder.create(); alertDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE, DialogInterface.BUTTON_NEUTRAL}, getAccentColor(), alertDialog); return true; case R.id.delete_action: getNavigationBar(); class DeletePhotos extends AsyncTask<String, Integer, Boolean> { private boolean succ = false; @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); super.onPreExecute(); } @Override protected Boolean doInBackground(String... arg0) { //if in album mode, delete selected albums if (albumsMode) succ = getAlbums().deleteSelectedAlbums(LFMainActivity.this); else { // if in selection mode, delete selected media if (editMode && !all_photos && !fav_photos) succ = getAlbum().deleteSelectedMedia(getApplicationContext()); else if (all_photos && !fav_photos) { for (Media media : selectedMedias) { String[] projection = {MediaStore.Images.Media._ID}; // Match on the file path String selection = MediaStore.Images.Media.DATA + " = ?"; String[] selectionArgs = new String[]{media.getPath()}; // Query for the ID of the media matching the file path Uri queryUri = MediaStore.Images.Media.EXTERNAL_CONTENT_URI; ContentResolver contentResolver = getContentResolver(); Cursor c = contentResolver.query(queryUri, projection, selection, selectionArgs, null); if (c.moveToFirst()) { // We found the ID. Deleting the item via the content provider will also remove the file long id = c.getLong(c.getColumnIndexOrThrow(MediaStore.Images.Media._ID)); Uri deleteUri = ContentUris .withAppendedId(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, id); contentResolver.delete(deleteUri, null, null); succ = true; } else { succ = false; // File not found in media store DB } c.close(); } } else if (fav_photos && !all_photos) { realm = Realm.getDefaultInstance(); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { for (int i = 0; i < selectedMedias.size(); i++) { RealmResults<FavouriteImagesModel> favouriteImagesModels = realm.where (FavouriteImagesModel.class).equalTo("path", selectedMedias.get (i).getPath()).findAll(); favouriteImagesModels.deleteAllFromRealm(); } } }); succ = true; } // if not in selection mode, delete current album entirely else if (!editMode) { succ = getAlbums().deleteAlbum(getAlbum(), getApplicationContext()); getAlbum().getMedia().clear(); } } return succ; } @Override protected void onPostExecute(Boolean result) { if (result) { // in albumsMode, the selected albums have been deleted. if (albumsMode) { getAlbums().clearSelectedAlbums(); albumsAdapter.notifyDataSetChanged(); } else { if (!all_photos && !fav_photos) { //if all media in current album have been deleted, delete current album too. if (getAlbum().getMedia().size() == 0) { getAlbums().removeCurrentAlbum(); albumsAdapter.notifyDataSetChanged(); displayAlbums(); swipeRefreshLayout.setRefreshing(true); } else mediaAdapter.swapDataSet(getAlbum().getMedia()); } else if (all_photos && !fav_photos) { clearSelectedPhotos(); listAll = StorageProvider.getAllShownImages(LFMainActivity.this); media = listAll; size = listAll.size(); Collections.sort(listAll, MediaComparators.getComparator(getAlbum().settings .getSortingMode(), getAlbum().settings.getSortingOrder())); mediaAdapter.swapDataSet(listAll); } else if (fav_photos && !all_photos) { clearSelectedPhotos(); getfavouriteslist(); new FavouritePhotos().execute(); } } } else requestSdCardPermissions(); invalidateOptionsMenu(); checkNothing(); swipeRefreshLayout.setRefreshing(false); } } AlertDialog.Builder deleteDialog = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialogsHelper.getTextDialog(this, deleteDialog, R.string.delete, albumsMode || !editMode ? R.string.delete_album_message : R.string.delete_photos_message, null); deleteDialog.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); deleteDialog.setPositiveButton(this.getString(R.string.delete).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { if (securityObj.isActiveSecurity() && securityObj.isPasswordOnDelete()) { final boolean passco[] = {false}; AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog on wrong password } }); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { // if password is correct, call DeletePhotos and perform deletion if (securityObj.checkPassword(editTextPassword.getText().toString())) { passwordDialog.dismiss(); new DeletePhotos().execute(); } // if password is incorrect, don't delete and notify user of incorrect password else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), navigationView.getHeight()); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else new DeletePhotos().execute(); } }); AlertDialog alertDialogDelete = deleteDialog.create(); alertDialogDelete.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialogDelete); return true; case R.id.excludeAlbumButton: final AlertDialog.Builder excludeDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final View excludeDialogLayout = getLayoutInflater().inflate(R.layout.dialog_exclude, null); TextView textViewExcludeTitle = (TextView) excludeDialogLayout.findViewById(R.id.text_dialog_title); TextView textViewExcludeMessage = (TextView) excludeDialogLayout.findViewById(R.id.text_dialog_message); final Spinner spinnerParents = (Spinner) excludeDialogLayout.findViewById(R.id.parents_folder); spinnerParents.getBackground().setColorFilter(getIconColor(), PorterDuff.Mode.SRC_ATOP); ((CardView) excludeDialogLayout.findViewById(R.id.message_card)).setCardBackgroundColor(getCardBackgroundColor()); textViewExcludeTitle.setBackgroundColor(getPrimaryColor()); textViewExcludeTitle.setText(getString(R.string.exclude)); if ((albumsMode && getAlbums().getSelectedCount() > 1)) { textViewExcludeMessage.setText(R.string.exclude_albums_message); spinnerParents.setVisibility(View.GONE); } else { textViewExcludeMessage.setText(R.string.exclude_album_message); spinnerParents.setAdapter(getSpinnerAdapter(albumsMode ? getAlbums().getSelectedAlbum(0).getParentsFolders() : getAlbum().getParentsFolders())); } textViewExcludeMessage.setTextColor(getTextColor()); excludeDialogBuilder.setView(excludeDialogLayout); excludeDialogBuilder.setPositiveButton(this.getString(R.string.exclude).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { if ((albumsMode && getAlbums().getSelectedCount() > 1)) { getAlbums().excludeSelectedAlbums(getApplicationContext()); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } else { customAlbumsHelper.excludeAlbum(spinnerParents.getSelectedItem().toString()); finishEditMode(); displayAlbums(true); } } }); excludeDialogBuilder.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); AlertDialog alertDialogExclude = excludeDialogBuilder.create(); alertDialogExclude.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialogExclude); return true; case R.id.zipAlbumButton: path = new ArrayList<>(); File folder = new File(getAlbums().getSelectedAlbum(0).getPath() + "/"); File[] fpath = folder.listFiles(); for (int i = 0; i < fpath.length; i++) { if (fpath[i].getPath().endsWith(".jpg")) { path.add(fpath[i].getPath()); } } new ZipAlbumTask().execute(); return true; case R.id.sharePhotos: Intent intent = new Intent(); intent.setAction(Intent.ACTION_SEND_MULTIPLE); intent.putExtra(Intent.EXTRA_SUBJECT, getString(R.string.sent_to_action)); // list of all selected media in current album ArrayList<Uri> files = new ArrayList<Uri>(); if (!all_photos && !fav_photos) { for (Media f : getAlbum().getSelectedMedia()) files.add(f.getUri()); } else if (all_photos && !fav_photos) { for (Media f : selectedMedias) files.add(f.getUri()); } else if (fav_photos && !all_photos) { for (Media m : selectedMedias) { files.add(m.getUri()); } } if (!all_photos && !fav_photos) { for (Media f : getAlbum().getSelectedMedia()) { Realm realm = Realm.getDefaultInstance(); realm.beginTransaction(); UploadHistoryRealmModel uploadHistory; uploadHistory = realm.createObject(UploadHistoryRealmModel.class); uploadHistory.setName("OTHERS"); uploadHistory.setPathname(f.getPath()); uploadHistory.setDatetime(new SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(new Date())); uploadHistory.setStatus(getString(R.string.upload_done)); realm.commitTransaction(); Intent result = new Intent(); result.putExtra(Constants.SHARE_RESULT, 0); setResult(RESULT_OK, result); } } else if (all_photos || fav_photos) { for (Media m : selectedMedias) { Realm realm = Realm.getDefaultInstance(); realm.beginTransaction(); UploadHistoryRealmModel uploadHistory; uploadHistory = realm.createObject(UploadHistoryRealmModel.class); uploadHistory.setName("OTHERS"); uploadHistory.setPathname(m.getPath()); uploadHistory.setDatetime(new SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(new Date())); uploadHistory.setStatus(getString(R.string.upload_done)); realm.commitTransaction(); Intent result = new Intent(); result.putExtra(Constants.SHARE_RESULT, 0); setResult(RESULT_OK, result); } } String extension = files.get(0).getPath().substring(files.get(0).getPath().lastIndexOf('.') + 1); String mimeType = MimeTypeMap.getSingleton().getMimeTypeFromExtension(extension); intent.putParcelableArrayListExtra(Intent.EXTRA_STREAM, files); if (!all_photos && !fav_photos) intent.setType(StringUtils.getGenericMIME(getAlbum().getSelectedMedia(0).getMimeType())); else if (all_photos && !fav_photos) intent.setType(mimeType); else if (fav_photos && !all_photos) intent.setType(mimeType); finishEditMode(); startActivity(Intent.createChooser(intent, getResources().getText(R.string.send_to))); return true; case R.id.name_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(NAME); new SortingUtilsAlbums().execute(); } else { new SortModeSet().execute(NAME); if (!all_photos && !fav_photos) { new SortingUtilsPhtots().execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll().execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist().execute(); } } item.setChecked(true); return true; case R.id.date_taken_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(DATE); new SortingUtilsAlbums().execute(); } else { new SortModeSet().execute(DATE); if (!all_photos && !fav_photos) { new SortingUtilsPhtots().execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll().execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist().execute(); } } item.setChecked(true); return true; case R.id.size_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(SIZE); new SortingUtilsAlbums().execute(); } else { new SortModeSet().execute(SIZE); if (!all_photos && !fav_photos) { new SortingUtilsPhtots().execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll().execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist().execute(); } } item.setChecked(true); return true; case R.id.numeric_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(NUMERIC); new SortingUtilsAlbums().execute(); } else { new SortModeSet().execute(NUMERIC); if (!all_photos && !fav_photos) { new SortingUtilsPhtots().execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll().execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist().execute(); } } item.setChecked(true); return true; case R.id.ascending_sort_action: if (albumsMode) { getAlbums().setDefaultSortingAscending(item.isChecked() ? SortingOrder.DESCENDING : SortingOrder.ASCENDING); new SortingUtilsAlbums().execute(); } else { getAlbum().setDefaultSortingAscending(getApplicationContext(), item.isChecked() ? SortingOrder.DESCENDING : SortingOrder.ASCENDING); if (!all_photos && !fav_photos) { new SortingUtilsPhtots().execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll().execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist().execute(); } } item.setChecked(!item.isChecked()); return true; //region Affix case R.id.affixPhoto: //region Async MediaAffix class affixMedia extends AsyncTask<Affix.Options, Integer, Void> { private AlertDialog dialog; @Override protected void onPreExecute() { AlertDialog.Builder progressDialog = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); dialog = AlertDialogsHelper.getProgressDialog(LFMainActivity.this, progressDialog, getString(R.string.affix), getString(R.string.affix_text)); dialog.show(); super.onPreExecute(); } @Override protected Void doInBackground(Affix.Options... arg0) { ArrayList<Bitmap> bitmapArray = new ArrayList<Bitmap>(); if (!all_photos) { for (int i = 0; i < getAlbum().getSelectedCount(); i++) { bitmapArray.add(getBitmap(getAlbum().getSelectedMedia(i).getPath())); } } else { for (int i = 0; i < selectedMedias.size(); i++) { bitmapArray.add(getBitmap(selectedMedias.get(i).getPath())); } } if (bitmapArray.size() > 1) Affix.AffixBitmapList(getApplicationContext(), bitmapArray, arg0[0]); else runOnUiThread(new Runnable() { @Override public void run() { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.affix_error), navigationView.getHeight()); } }); return null; } @Override protected void onPostExecute(Void result) { editMode = false; if (!all_photos) getAlbum().clearSelectedPhotos(); else clearSelectedPhotos(); dialog.dismiss(); invalidateOptionsMenu(); mediaAdapter.notifyDataSetChanged(); if (!all_photos) new PreparePhotosTask().execute(); else clearSelectedPhotos(); } } //endregion final AlertDialog.Builder builder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final View dialogLayout = getLayoutInflater().inflate(R.layout.dialog_affix, null); dialogLayout.findViewById(R.id.affix_title).setBackgroundColor(getPrimaryColor()); ((CardView) dialogLayout.findViewById(R.id.affix_card)).setCardBackgroundColor(getCardBackgroundColor()); //ITEMS final SwitchCompat swVertical = (SwitchCompat) dialogLayout.findViewById(R.id.affix_vertical_switch); final SwitchCompat swSaveHere = (SwitchCompat) dialogLayout.findViewById(R.id.save_here_switch); final RadioGroup radioFormatGroup = (RadioGroup) dialogLayout.findViewById(R.id.radio_format); final TextView txtQuality = (TextView) dialogLayout.findViewById(R.id.affix_quality_title); final SeekBar seekQuality = (SeekBar) dialogLayout.findViewById(R.id.seek_bar_quality); //region THEME STUFF setScrollViewColor((ScrollView) dialogLayout.findViewById(R.id.affix_scrollView)); /** TextViews **/ int color = getTextColor(); ((TextView) dialogLayout.findViewById(R.id.affix_vertical_title)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.compression_settings_title)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.save_here_title)).setTextColor(color); /** Sub TextViews **/ color = getTextColor(); ((TextView) dialogLayout.findViewById(R.id.save_here_sub)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.affix_vertical_sub)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.affix_format_sub)).setTextColor(color); txtQuality.setTextColor(color); /** Icons **/ color = getIconColor(); ((IconicsImageView) dialogLayout.findViewById(R.id.affix_quality_icon)).setColor(color); ((IconicsImageView) dialogLayout.findViewById(R.id.affix_format_icon)).setColor(color); ((IconicsImageView) dialogLayout.findViewById(R.id.affix_vertical_icon)).setColor(color); ((IconicsImageView) dialogLayout.findViewById(R.id.save_here_icon)).setColor(color); seekQuality.getProgressDrawable().setColorFilter(new PorterDuffColorFilter(getAccentColor(), PorterDuff.Mode.SRC_IN)); seekQuality.getThumb().setColorFilter(new PorterDuffColorFilter(getAccentColor(), PorterDuff.Mode.SRC_IN)); updateRadioButtonColor((RadioButton) dialogLayout.findViewById(R.id.radio_jpeg)); updateRadioButtonColor((RadioButton) dialogLayout.findViewById(R.id.radio_png)); updateRadioButtonColor((RadioButton) dialogLayout.findViewById(R.id.radio_webp)); updateSwitchColor(swVertical, getAccentColor()); updateSwitchColor(swSaveHere, getAccentColor()); //endregion seekQuality.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() { @Override public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) { txtQuality.setText(Html.fromHtml( String.format(Locale.getDefault(), "%s <b>%d</b>", getString(R.string.quality), progress))); } @Override public void onStartTrackingTouch(SeekBar seekBar) { } @Override public void onStopTrackingTouch(SeekBar seekBar) { } }); seekQuality.setProgress(90); //DEFAULT swVertical.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { updateSwitchColor(swVertical, getAccentColor()); } }); swSaveHere.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { updateSwitchColor(swSaveHere, getAccentColor()); } }); builder.setView(dialogLayout); builder.setPositiveButton(this.getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { Bitmap.CompressFormat compressFormat; switch (radioFormatGroup.getCheckedRadioButtonId()) { case R.id.radio_jpeg: default: compressFormat = Bitmap.CompressFormat.JPEG; break; case R.id.radio_png: compressFormat = Bitmap.CompressFormat.PNG; break; case R.id.radio_webp: compressFormat = Bitmap.CompressFormat.WEBP; break; } Affix.Options options = new Affix.Options( swSaveHere.isChecked() ? getAlbum().getPath() : Affix.getDefaultDirectoryPath(), compressFormat, seekQuality.getProgress(), swVertical.isChecked()); new affixMedia().execute(options); } }); builder.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); AlertDialog affixDialog = builder.create(); affixDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), affixDialog); return true; //endregion case R.id.action_move: bottomSheetDialogFragment = new SelectAlbumBottomSheet(); bottomSheetDialogFragment.setTitle(getString(R.string.move_to)); if (!albumsMode) { bottomSheetDialogFragment.setSelectAlbumInterface(new SelectAlbumBottomSheet.SelectAlbumInterface() { @Override public void folderSelected(final String path) { swipeRefreshLayout.setRefreshing(true); int numberOfImagesMoved; if ((numberOfImagesMoved = getAlbum().moveSelectedMedia(getApplicationContext(), path)) > 0) { if (getAlbum().getMedia().size() == 0) { getAlbums().removeCurrentAlbum(); albumsAdapter.notifyDataSetChanged(); displayAlbums(); } mediaAdapter.swapDataSet(getAlbum().getMedia()); finishEditMode(); invalidateOptionsMenu(); if (numberOfImagesMoved > 1) SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.photos_moved_successfully), navigationView.getHeight()); else SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.photo_moved_successfully), navigationView.getHeight()); } else if (numberOfImagesMoved == -1 && getAlbum().getPath().equals(path)) { //moving to the same folder AlertDialog.Builder alertDialog = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); alertDialog.setCancelable(false); AlertDialogsHelper.getTextDialog(LFMainActivity.this, alertDialog, R.string.move_to, R.string.move, null); alertDialog.setNeutralButton(getString(R.string.make_copies).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { new CopyPhotos(path, true, false).execute(); } }); alertDialog.setPositiveButton(getString(R.string.cancel).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int id) { dialog.cancel(); } }); alertDialog.setNegativeButton(getString(R.string.replace).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int id) { finishEditMode(); invalidateOptionsMenu(); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.photo_moved_successfully), navigationView.getHeight()); } }); AlertDialog alert = alertDialog.create(); alert.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE, DialogInterface.BUTTON_NEUTRAL}, getAccentColor(), alert); } else requestSdCardPermissions(); swipeRefreshLayout.setRefreshing(false); bottomSheetDialogFragment.dismiss(); } }); bottomSheetDialogFragment.show(getSupportFragmentManager(), bottomSheetDialogFragment.getTag()); } else { AlertDialog.Builder alertDialogMoveAll = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); alertDialogMoveAll.setCancelable(false); AlertDialogsHelper.getTextDialog(LFMainActivity.this, alertDialogMoveAll, R.string.move_to, R.string.move_all_photos, null); alertDialogMoveAll.setPositiveButton(R.string.ok_action, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { bottomSheetDialogFragment.show(getSupportFragmentManager(), bottomSheetDialogFragment.getTag()); } }); alertDialogMoveAll.setNegativeButton(getString(R.string.cancel).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.cancel(); } }); bottomSheetDialogFragment.setSelectAlbumInterface(new SelectAlbumBottomSheet.SelectAlbumInterface() { @Override public void folderSelected(String path) { swipeRefreshLayout.setRefreshing(true); if (getAlbums().moveSelectedAlbum(LFMainActivity.this, path)) { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.moved_target_folder_success), SnackBarHandler.LONG); getAlbums().deleteSelectedAlbums(LFMainActivity.this); getAlbums().clearSelectedAlbums(); new PrepareAlbumTask().execute(); } else { requestSdCardPermissions(); swipeRefreshLayout.setRefreshing(false); invalidateOptionsMenu(); } bottomSheetDialogFragment.dismiss(); } }); AlertDialog dialog = alertDialogMoveAll.create(); dialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface .BUTTON_NEGATIVE}, getAccentColor(), dialog); } return true; case R.id.action_add_favourites: int count = 0; ArrayList<Media> favadd; if (!all_photos) { favadd = getAlbum().getSelectedMedia(); } else { favadd = selectedMedias; } for (int i = 0; i < favadd.size(); i++) { String realpath = favadd.get(i).getPath(); RealmQuery<FavouriteImagesModel> query = realm.where(FavouriteImagesModel.class).equalTo("path", realpath); if (query.count() == 0) { count++; realm.beginTransaction(); FavouriteImagesModel fav = realm.createObject(FavouriteImagesModel.class, realpath); ImageDescModel q = realm.where(ImageDescModel.class).equalTo("path", realpath).findFirst(); if (q != null) { fav.setDescription(q.getTitle()); } else { fav.setDescription(" "); } realm.commitTransaction(); } } finishEditMode(); if (count == 0) { SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.check_favourite_multipleitems)); } else if (count == 1) { SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.add_favourite)); } else { SnackBarHandler.show(mDrawerLayout, count + " " + getResources().getString(R.string .add_favourite_multiple)); } mediaAdapter.notifyDataSetChanged(); return true; case R.id.action_copy: bottomSheetDialogFragment = new SelectAlbumBottomSheet(); bottomSheetDialogFragment.setTitle(getString(R.string.copy_to)); bottomSheetDialogFragment.setSelectAlbumInterface(new SelectAlbumBottomSheet.SelectAlbumInterface() { @Override public void folderSelected(String path) { new CopyPhotos(path, false, true).execute(); bottomSheetDialogFragment.dismiss(); } }); bottomSheetDialogFragment.show(getSupportFragmentManager(), bottomSheetDialogFragment.getTag()); return true; case R.id.renameAlbum: AlertDialog.Builder renameDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextNewName = new EditText(getApplicationContext()); editTextNewName.setText(albumsMode ? getAlbums().getSelectedAlbum(0).getName() : getAlbum().getName()); editTextNewName.setSelectAllOnFocus(true); editTextNewName.setHint(R.string.description_hint); editTextNewName.setHintTextColor(ContextCompat.getColor(getApplicationContext(), R.color.grey)); editTextNewName.setHighlightColor(ContextCompat.getColor(getApplicationContext(), R.color.cardview_shadow_start_color)); editTextNewName.selectAll(); editTextNewName.setSingleLine(false); final String albumName = albumsMode ? getAlbums().getSelectedAlbum(0).getName() : getAlbum().getName(); AlertDialogsHelper.getInsertTextDialog(LFMainActivity.this, renameDialogBuilder, editTextNewName, R.string.rename_album, null); renameDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); renameDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog } }); final AlertDialog renameDialog = renameDialogBuilder.create(); renameDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_IS_FORWARD_NAVIGATION); editTextNewName.setSelection(editTextNewName.getText().toString().length()); renameDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface .BUTTON_NEGATIVE}, getAccentColor(), renameDialog); renameDialog.getButton(AlertDialog.BUTTON_POSITIVE).setEnabled(false); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, ContextCompat.getColor(LFMainActivity.this, R.color.grey), renameDialog); editTextNewName.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if (TextUtils.isEmpty(editable)) { // Disable ok button renameDialog.getButton( AlertDialog.BUTTON_POSITIVE).setEnabled(false); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, ContextCompat.getColor(LFMainActivity.this, R.color.grey), renameDialog); } else { // Something into edit text. Enable the button. renameDialog.getButton( AlertDialog.BUTTON_POSITIVE).setEnabled(true); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, getAccentColor(), renameDialog); } } }); renameDialog.getButton(DialogInterface.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View dialog) { boolean rename = false; if (editTextNewName.length() != 0) { swipeRefreshLayout.setRefreshing(true); boolean success = false; if (albumsMode) { if (!editTextNewName.getText().toString().equals(albumName)) { int index = getAlbums().dispAlbums.indexOf(getAlbums().getSelectedAlbum(0)); getAlbums().getAlbum(index).updatePhotos(getApplicationContext()); success = getAlbums().getAlbum(index).renameAlbum(getApplicationContext(), editTextNewName.getText().toString()); albumsAdapter.notifyItemChanged(index); } else { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_no_change), navigationView.getHeight()); rename = true; } } else { if (!editTextNewName.getText().toString().equals(albumName)) { success = getAlbum().renameAlbum(getApplicationContext(), editTextNewName.getText().toString()); toolbar.setTitle(getAlbum().getName()); mediaAdapter.notifyDataSetChanged(); } else { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_no_change), navigationView.getHeight()); rename = true; } } renameDialog.dismiss(); if (success) { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_succes), navigationView.getHeight()); getAlbums().clearSelectedAlbums(); invalidateOptionsMenu(); } else if (!rename) { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_error), navigationView.getHeight()); requestSdCardPermissions(); } swipeRefreshLayout.setRefreshing(false); } else { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.insert_something), navigationView.getHeight()); editTextNewName.requestFocus(); } } }); return true; case R.id.clear_album_preview: if (!albumsMode) { getAlbum().removeCoverAlbum(getApplicationContext()); } return true; case R.id.setAsAlbumPreview: if (!albumsMode) { getAlbum().setSelectedPhotoAsPreview(getApplicationContext()); finishEditMode(); } return true; default: // If we got here, the user's action was not recognized. // Invoke the superclass to handle it. return super.onOptionsItemSelected(item); } } private class SortModeSet extends AsyncTask<SortingMode, Void, Void> { @Override protected Void doInBackground(SortingMode... sortingModes) { for (Album a : getAlbums().dispAlbums) { if (a.settings.getSortingMode().getValue() != sortingModes[0].getValue()) { a.setDefaultSortingMode(getApplicationContext(), sortingModes[0]); } } return null; } } private Bitmap getBitmap(String path) { Uri uri = Uri.fromFile(new File(path)); InputStream in = null; try { final int IMAGE_MAX_SIZE = 1200000; // 1.2MP in = getContentResolver().openInputStream(uri); // Decode image size BitmapFactory.Options o = new BitmapFactory.Options(); o.inJustDecodeBounds = true; BitmapFactory.decodeStream(in, null, o); in.close(); int scale = 1; while ((o.outWidth * o.outHeight) * (1 / Math.pow(scale, 2)) > IMAGE_MAX_SIZE) { scale++; } Bitmap bitmap = null; in = getContentResolver().openInputStream(uri); if (scale > 1) { scale--; // scale to max possible inSampleSize that still yields an image // larger than target o = new BitmapFactory.Options(); o.inSampleSize = scale; bitmap = BitmapFactory.decodeStream(in, null, o); // resize to desired dimensions int height = bitmap.getHeight(); int width = bitmap.getWidth(); double y = Math.sqrt(IMAGE_MAX_SIZE / (((double) width) / height)); double x = (y / height) * width; Bitmap scaledBitmap = Bitmap.createScaledBitmap(bitmap, (int) x, (int) y, true); bitmap.recycle(); bitmap = scaledBitmap; System.gc(); } else { bitmap = BitmapFactory.decodeStream(in); } in.close(); Log.d(TAG, "bitmap size - width: " + bitmap.getWidth() + ", height: " + bitmap.getHeight()); return bitmap; } catch (IOException e) { Log.e(TAG, e.getMessage(), e); return null; } } public void getNavigationBar() { if (editMode && hidenav) { showNavigationBar(); hidenav = false; } } //to copy from all photos. private boolean copyfromallphotos(Context context, String folderPath) { boolean success = false; for (Media m : selectedMedias) { try { File from = new File(m.getPath()); File to = new File(folderPath); if (success = ContentHelper.copyFile(context, from, to)) scanFile(context, new String[]{StringUtils.getPhotoPathMoved(m.getPath(), folderPath)}); } catch (Exception e) { e.printStackTrace(); } } return success; } public void scanFile(Context context, String[] path) { MediaScannerConnection.scanFile(context, path, null, null); } /** * If we are in albumsMode, make the albums recyclerView visible. If we are not, make media recyclerView visible. * * @param albumsMode it indicates whether we are in album selection mode or not */ private void toggleRecyclersVisibility(boolean albumsMode) { rvAlbums.setVisibility(albumsMode ? View.VISIBLE : View.GONE); rvMedia.setVisibility(albumsMode ? View.GONE : View.VISIBLE); nothingToShow.setVisibility(View.GONE); starImageView.setVisibility(View.GONE); if (albumsMode) fabScrollUp.hide(); //touchScrollBar.setScrollBarHidden(albumsMode); } private void tint() { if (localFolder) { defaultIcon.setColor(getPrimaryColor()); defaultText.setTextColor(getPrimaryColor()); hiddenIcon.setColor(getIconColor()); hiddenText.setTextColor(getTextColor()); } else { hiddenIcon.setColor(getPrimaryColor()); hiddenText.setTextColor(getPrimaryColor()); defaultIcon.setColor(getIconColor()); defaultText.setTextColor(getTextColor()); } } /** * handles back presses. * If we are currently in selection mode, back press will take us out of selection mode. * If we are not in selection mode but in albumsMode and the drawer is open, back press will close it. * If we are not in selection mode but in albumsMode and the drawer is closed, finish the activity. * If we are neither in selection mode nor in albumsMode, display the albums again. */ @Override public void onBackPressed() { checkForReveal = true; if ((editMode && all_photos) || (editMode && fav_photos)) clearSelectedPhotos(); getNavigationBar(); if (editMode) finishEditMode(); else { if (albumsMode) { if (mDrawerLayout.isDrawerOpen(GravityCompat.START)) mDrawerLayout.closeDrawer(GravityCompat.START); else { if (doubleBackToExitPressedOnce && isTaskRoot()) finish(); else if (isTaskRoot()) { doubleBackToExitPressedOnce = true; View rootView = LFMainActivity.this.getWindow().getDecorView().findViewById(android.R.id.content); Snackbar snackbar = Snackbar .make(rootView, R.string.press_back_again_to_exit, Snackbar.LENGTH_LONG) .setAction(R.string.exit, new View.OnClickListener() { @Override public void onClick(View view) { finishAffinity(); } }) .setActionTextColor(getAccentColor()); View sbView = snackbar.getView(); final FrameLayout.LayoutParams params = (FrameLayout.LayoutParams) sbView.getLayoutParams(); params.setMargins(params.leftMargin, params.topMargin, params.rightMargin, params.bottomMargin + navigationView.getHeight()); sbView.setLayoutParams(params); snackbar.show(); new Handler().postDelayed(new Runnable() { @Override public void run() { doubleBackToExitPressedOnce = false; } }, 2000); } else super.onBackPressed(); } } else { displayAlbums(); } } } private class ZipAlbumTask extends AsyncTask<Void, Integer, Void> { @Override protected void onPreExecute() { super.onPreExecute(); NotificationHandler.make(R.string.folder, R.string.zip_fol, R.drawable.ic_archive_black_24dp); } @Override protected Void doInBackground(Void... voids) { try { double c = 0.0; BufferedInputStream origin = null; FileOutputStream dest = new FileOutputStream(getAlbums().getSelectedAlbum(0).getParentsFolders().get (1) + "/" + getAlbums().getSelectedAlbum(0).getName() + ".zip"); ZipOutputStream out = new ZipOutputStream(new BufferedOutputStream( dest)); byte data[] = new byte[BUFFER]; for (int i = 0; i < path.size(); i++) { FileInputStream fi = new FileInputStream(path.get(i)); origin = new BufferedInputStream(fi, BUFFER); ZipEntry entry = new ZipEntry(path.get(i).substring(path.get(i).lastIndexOf("/") + 1)); out.putNextEntry(entry); c++; if ((int) ((c / size) * 100) > 100) { NotificationHandler.actionProgress((int) c, path.size(), 100, R.string.zip_operation); } else { NotificationHandler.actionProgress((int) c, path.size(), (int) ((c / path.size()) * 100), R.string .zip_operation); } int count; while ((count = origin.read(data, 0, BUFFER)) != -1) { out.write(data, 0, count); } origin.close(); } out.close(); if (isCancelled()) { return null; } } catch (Exception e) { e.printStackTrace(); } return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); NotificationHandler.actionPassed(R.string.zip_completion); String path = getAlbums().getSelectedAlbum(0).getParentsFolders().get(1) + getAlbums().getSelectedAlbum (0).getName() + ".zip"; SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.zip_location) + path); getAlbums().clearSelectedAlbums(); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } } private class PrepareAlbumTask extends AsyncTask<Void, Integer, Void> { @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); toggleRecyclersVisibility(true); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { getAlbums().loadAlbums(getApplicationContext(), hidden); return null; } @Override protected void onPostExecute(Void result) { albumsAdapter.swapDataSet(getAlbums().dispAlbums); albList = new ArrayList<>(); populateAlbum(); checkNothing(); swipeRefreshLayout.setRefreshing(false); getAlbums().saveBackup(getApplicationContext()); invalidateOptionsMenu(); finishEditMode(); } } private class PreparePhotosTask extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); toggleRecyclersVisibility(false); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { getAlbum().updatePhotos(getApplicationContext()); return null; } @Override protected void onPostExecute(Void result) { mediaAdapter.swapDataSet(getAlbum().getMedia()); if (!hidden) HandlingAlbums.addAlbumToBackup(getApplicationContext(), getAlbum()); checkNothing(); swipeRefreshLayout.setRefreshing(false); invalidateOptionsMenu(); finishEditMode(); } } private class PrepareAllPhotos extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); toggleRecyclersVisibility(false); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { getAlbum().updatePhotos(getApplicationContext()); return null; } @Override protected void onPostExecute(Void result) { listAll = StorageProvider.getAllShownImages(LFMainActivity.this); size = listAll.size(); Collections.sort(listAll, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings.getSortingOrder())); mediaAdapter.swapDataSet(listAll); if (!hidden) HandlingAlbums.addAlbumToBackup(getApplicationContext(), getAlbum()); checkNothing(); swipeRefreshLayout.setRefreshing(false); invalidateOptionsMenu(); finishEditMode(); toolbar.setTitle(getString(R.string.all_media)); clearSelectedPhotos(); } } private class FavouritePhotos extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); toggleRecyclersVisibility(false); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { getAlbum().updatePhotos(getApplicationContext()); return null; } @Override protected void onPostExecute(Void result) { Collections.sort(favouriteslist, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum() .settings.getSortingOrder())); mediaAdapter.swapDataSet(favouriteslist); checkNothingFavourites(); swipeRefreshLayout.setRefreshing(false); invalidateOptionsMenu(); finishEditMode(); toolbar.setTitle(getResources().getString(R.string.favourite_title)); clearSelectedPhotos(); } } /* Async Class for Sorting Photos - NOT listAll */ private class SortingUtilsPhtots extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { super.onPreExecute(); swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { getAlbum().sortPhotos(); return null; } protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); swipeRefreshLayout.setRefreshing(false); mediaAdapter.swapDataSet(getAlbum().getMedia()); } } /* Async Class for Sorting Photos - listAll */ private class SortingUtilsListAll extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { super.onPreExecute(); swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { Collections.sort(listAll, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings.getSortingOrder())); return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); swipeRefreshLayout.setRefreshing(false); mediaAdapter.swapDataSet(listAll); } } /* Async Class for Sorting Favourites */ private class SortingUtilsFavouritelist extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { super.onPreExecute(); swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { Collections.sort(favouriteslist, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum() .settings.getSortingOrder())); return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); swipeRefreshLayout.setRefreshing(false); mediaAdapter.swapDataSet(favouriteslist); } } /* Async Class for Sorting Albums */ private class SortingUtilsAlbums extends AsyncTask<Void, Void, Void> { @Override protected void onPreExecute() { super.onPreExecute(); swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { getAlbums().sortAlbums(getApplicationContext()); return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); swipeRefreshLayout.setRefreshing(false); albumsAdapter.swapDataSet(getAlbums().dispAlbums); new PrepareAlbumTask().execute(); } } /* Async Class for coping images */ private class CopyPhotos extends AsyncTask<String, Integer, Boolean> { private String path; private Boolean moveAction, copyAction, success; CopyPhotos(String path, Boolean moveAction, Boolean copyAction) { this.path = path; this.moveAction = moveAction; this.copyAction = copyAction; } @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); super.onPreExecute(); } @Override protected Boolean doInBackground(String... arg0) { if (!all_photos) { success = getAlbum().copySelectedPhotos(getApplicationContext(), path); MediaStoreProvider.getAlbums(LFMainActivity.this); getAlbum().updatePhotos(getApplicationContext()); } else { success = copyfromallphotos(getApplicationContext(), path); } return success; } @Override protected void onPostExecute(Boolean result) { if (result) { if (!all_photos) { mediaAdapter.swapDataSet(getAlbum().getMedia()); } else { mediaAdapter.swapDataSet(listAll); } mediaAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); swipeRefreshLayout.setRefreshing(false); finishEditMode(); if (moveAction) SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.photos_moved_successfully), navigationView.getHeight()); else if (copyAction) SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.copied_successfully), navigationView.getHeight()); } else requestSdCardPermissions(); } } }
1
12,232
Can you remove it from the menu instead of setting it hidden? Or are there any other trouble?
fossasia-phimpme-android
java
@@ -32,6 +32,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + execution "github.com/temporalio/temporal/.gen/proto/execution" persistence "github.com/temporalio/temporal/common/persistence" )
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // Code generated by MockGen. DO NOT EDIT. // Source: conflictResolver.go // Package history is a generated GoMock package. package history import ( reflect "reflect" gomock "github.com/golang/mock/gomock" persistence "github.com/temporalio/temporal/common/persistence" ) // MockconflictResolver is a mock of conflictResolver interface. type MockconflictResolver struct { ctrl *gomock.Controller recorder *MockconflictResolverMockRecorder } // MockconflictResolverMockRecorder is the mock recorder for MockconflictResolver. type MockconflictResolverMockRecorder struct { mock *MockconflictResolver } // NewMockconflictResolver creates a new mock instance. func NewMockconflictResolver(ctrl *gomock.Controller) *MockconflictResolver { mock := &MockconflictResolver{ctrl: ctrl} mock.recorder = &MockconflictResolverMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockconflictResolver) EXPECT() *MockconflictResolverMockRecorder { return m.recorder } // reset mocks base method. func (m *MockconflictResolver) reset(prevRunID string, prevLastWriteVersion int64, prevState int, requestID string, replayEventID int64, info *persistence.WorkflowExecutionInfo, updateCondition int64) (mutableState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "reset", prevRunID, prevLastWriteVersion, prevState, requestID, replayEventID, info, updateCondition) ret0, _ := ret[0].(mutableState) ret1, _ := ret[1].(error) return ret0, ret1 } // reset indicates an expected call of reset. func (mr *MockconflictResolverMockRecorder) reset(prevRunID, prevLastWriteVersion, prevState, requestID, replayEventID, info, updateCondition interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "reset", reflect.TypeOf((*MockconflictResolver)(nil).reset), prevRunID, prevLastWriteVersion, prevState, requestID, replayEventID, info, updateCondition) }
1
9,566
And for "execution ".
temporalio-temporal
go
@@ -34,6 +34,8 @@ module Bolt search_dirs << mod.plugins if mod.plugins? search_dirs << mod.pluginfacts if mod.pluginfacts? search_dirs << mod.files if mod.files? + type_files = "#{mod.path}/types" + search_dirs << type_files if File.exist?(type_files) search_dirs end end
1
# frozen_string_literal: true require 'base64' require 'bolt/apply_result' require 'bolt/apply_target' require 'bolt/config' require 'bolt/error' require 'bolt/task' require 'bolt/util/puppet_log_level' require 'find' require 'json' require 'logging' require 'open3' module Bolt class Applicator def initialize(inventory, executor, modulepath, plugin_dirs, pdb_client, hiera_config, max_compiles, apply_settings) # lazy-load expensive gem code require 'concurrent' @inventory = inventory @executor = executor @modulepath = modulepath @plugin_dirs = plugin_dirs @pdb_client = pdb_client @hiera_config = hiera_config ? validate_hiera_config(hiera_config) : nil @apply_settings = apply_settings || {} @pool = Concurrent::ThreadPoolExecutor.new(max_threads: max_compiles) @logger = Logging.logger[self] @plugin_tarball = Concurrent::Delay.new do build_plugin_tarball do |mod| search_dirs = [] search_dirs << mod.plugins if mod.plugins? search_dirs << mod.pluginfacts if mod.pluginfacts? search_dirs << mod.files if mod.files? search_dirs end end end private def libexec @libexec ||= File.join(Gem::Specification.find_by_name('bolt').gem_dir, 'libexec') end def custom_facts_task @custom_facts_task ||= begin path = File.join(libexec, 'custom_facts.rb') file = { 'name' => 'custom_facts.rb', 'path' => path } metadata = { 'supports_noop' => true, 'input_method' => 'stdin', 'implementations' => [ { 'name' => 'custom_facts.rb' }, { 'name' => 'custom_facts.rb', 'remote' => true } ] } Bolt::Task.new('apply_helpers::custom_facts', metadata, [file]) end end def catalog_apply_task @catalog_apply_task ||= begin path = File.join(libexec, 'apply_catalog.rb') file = { 'name' => 'apply_catalog.rb', 'path' => path } metadata = { 'supports_noop' => true, 'input_method' => 'stdin', 'implementations' => [ { 'name' => 'apply_catalog.rb' }, { 'name' => 'apply_catalog.rb', 'remote' => true } ] } Bolt::Task.new('apply_helpers::apply_catalog', metadata, [file]) end end def query_resources_task @query_resources_task ||= begin path = File.join(libexec, 'query_resources.rb') file = { 'name' => 'query_resources.rb', 'path' => path } metadata = { 'supports_noop' => true, 'input_method' => 'stdin', 'implementations' => [ { 'name' => 'query_resources.rb' }, { 'name' => 'query_resources.rb', 'remote' => true } ] } Bolt::Task.new('apply_helpers::query_resources', metadata, [file]) end end def compile(target, catalog_input) # This simplified Puppet node object is what .local uses to determine the # certname of the target node = Puppet::Node.from_data_hash('name' => target.name, 'parameters' => { 'clientcert' => target.name }) trusted = Puppet::Context::TrustedInformation.local(node) catalog_input[:target] = { name: target.name, facts: @inventory.facts(target).merge('bolt' => true), variables: @inventory.vars(target), trusted: trusted.to_h } bolt_catalog_exe = File.join(libexec, 'bolt_catalog') old_path = ENV['PATH'] ENV['PATH'] = "#{RbConfig::CONFIG['bindir']}#{File::PATH_SEPARATOR}#{old_path}" out, err, stat = Open3.capture3('ruby', bolt_catalog_exe, 'compile', stdin_data: catalog_input.to_json) ENV['PATH'] = old_path # Any messages logged by Puppet will be on stderr as JSON hashes, so we # parse those and store them here. Any message on stderr that is not # properly JSON formatted is assumed to be an error message. If # compilation was successful, we print the logs as they may include # important warnings. If compilation failed, we don't print the logs as # they are likely redundant with the error that caused the failure, which # will be handled separately. logs = err.lines.map do |line| JSON.parse(line) rescue JSON::ParserError { 'level' => 'err', 'message' => line } end result = JSON.parse(out) if stat.success? logs.each do |log| bolt_level = Bolt::Util::PuppetLogLevel::MAPPING[log['level'].to_sym] message = log['message'].chomp @logger.send(bolt_level, "#{target.name}: #{message}") end result else raise ApplyError.new(target.name, result['message']) end end def validate_hiera_config(hiera_config) if File.exist?(File.path(hiera_config)) data = File.open(File.path(hiera_config), "r:UTF-8") { |f| YAML.safe_load(f.read, [Symbol]) } if data.nil? return nil elsif data['version'] != 5 raise Bolt::ParseError, "Hiera v5 is required, found v#{data['version'] || 3} in #{hiera_config}" end hiera_config end end def apply(args, apply_body, scope) raise(ArgumentError, 'apply requires a TargetSpec') if args.empty? type0 = Puppet.lookup(:pal_script_compiler).type('TargetSpec') Puppet::Pal.assert_type(type0, args[0], 'apply targets') @executor.report_function_call('apply') options = {} if args.count > 1 type1 = Puppet.lookup(:pal_script_compiler).type('Hash[String, Data]') Puppet::Pal.assert_type(type1, args[1], 'apply options') options = args[1].transform_keys { |k| k.sub(/^_/, '').to_sym } end plan_vars = scope.to_hash(true, true) targets = @inventory.get_targets(args[0]) apply_ast(apply_body, targets, options, plan_vars) end # Count the number of top-level statements in the AST. def count_statements(ast) case ast when Puppet::Pops::Model::Program count_statements(ast.body) when Puppet::Pops::Model::BlockExpression ast.statements.count else 1 end end def apply_ast(raw_ast, targets, options, plan_vars = {}) ast = Puppet::Pops::Serialization::ToDataConverter.convert(raw_ast, rich_data: true, symbol_to_string: true) # Serialize as pcore for *Result* objects plan_vars = Puppet::Pops::Serialization::ToDataConverter.convert(plan_vars, rich_data: true, symbol_as_string: true, type_by_reference: true, local_reference: true) scope = { code_ast: ast, modulepath: @modulepath, pdb_config: @pdb_client.config.to_hash, hiera_config: @hiera_config, plan_vars: plan_vars, # This data isn't available on the target config hash config: @inventory.transport_data_get } description = options[:description] || 'apply catalog' r = @executor.log_action(description, targets) do futures = targets.map do |target| Concurrent::Future.execute(executor: @pool) do @executor.with_node_logging("Compiling manifest block", [target]) do compile(target, scope) end end end result_promises = targets.zip(futures).flat_map do |target, future| @executor.queue_execute([target]) do |transport, batch| @executor.with_node_logging("Applying manifest block", batch) do catalog = future.value if future.rejected? batch.map do |batch_target| # If an unhandled exception occurred, wrap it in an ApplyError error = if future.reason.is_a?(Bolt::ApplyError) future.reason else Bolt::ApplyError.new(batch_target, future.reason.message) end result = Bolt::ApplyResult.new(batch_target, error: error.to_h) @executor.publish_event(type: :node_result, result: result) result end else arguments = { 'catalog' => Puppet::Pops::Types::PSensitiveType::Sensitive.new(catalog), 'plugins' => Puppet::Pops::Types::PSensitiveType::Sensitive.new(plugins), 'apply_settings' => @apply_settings, '_task' => catalog_apply_task.name, '_noop' => options[:noop] } callback = proc do |event| if event[:type] == :node_result event = event.merge(result: ApplyResult.from_task_result(event[:result])) end @executor.publish_event(event) end # Respect the run_as default set on the executor options[:run_as] = @executor.run_as if @executor.run_as && !options.key?(:run_as) results = transport.batch_task(batch, catalog_apply_task, arguments, options, &callback) Array(results).map { |result| ApplyResult.from_task_result(result) } end end end end @executor.await_results(result_promises) end # Allow for report to exclude event metrics (apply_result doesn't require it to be present) resource_counts = r.ok_set.map { |result| result.event_metrics&.fetch('total') }.compact @executor.report_apply(count_statements(raw_ast), resource_counts) if !r.ok && !options[:catch_errors] raise Bolt::ApplyFailure, r end r end def plugins @plugin_tarball.value || raise(Bolt::Error.new("Failed to pack module plugins: #{@plugin_tarball.reason}", 'bolt/plugin-error')) end def build_plugin_tarball # lazy-load expensive gem code require 'minitar' require 'zlib' start_time = Time.now sio = StringIO.new output = Minitar::Output.new(Zlib::GzipWriter.new(sio)) Puppet.lookup(:current_environment).override_with(modulepath: @plugin_dirs).modules.each do |mod| search_dirs = yield mod parent = Pathname.new(mod.path).parent files = Find.find(*search_dirs).select { |file| File.file?(file) } files.each do |file| tar_path = Pathname.new(file).relative_path_from(parent) @logger.debug("Packing plugin #{file} to #{tar_path}") stat = File.stat(file) content = File.binread(file) output.tar.add_file_simple( tar_path.to_s, data: content, size: content.size, mode: stat.mode & 0o777, mtime: stat.mtime ) end end duration = Time.now - start_time @logger.debug("Packed plugins in #{duration * 1000} ms") output.close Base64.encode64(sio.string) ensure output&.close end end end
1
14,769
Do agents usually download these from puppetserver when applying a catalog?
puppetlabs-bolt
rb
@@ -68,6 +68,14 @@ PONY_API int pony_asio_event_fd(asio_event_t* ev) return ev->fd; } +PONY_API bool pony_asio_event_get_disposable(asio_event_t* ev) +{ + if(ev == NULL) + return false; + + return (ev->flags & ASIO_DISPOSABLE) != 0; +} + PONY_API bool pony_asio_event_get_writeable(asio_event_t* ev) { if(ev == NULL)
1
#include "event.h" #include "asio.h" #include "../actor/actor.h" #include "../mem/pool.h" #include "../sched/scheduler.h" #include "ponyassert.h" #include <string.h> PONY_API asio_event_t* pony_asio_event_create(pony_actor_t* owner, int fd, uint32_t flags, uint64_t nsec, bool noisy) { if((flags == ASIO_DISPOSABLE) || (flags == ASIO_DESTROYED)) return NULL; pony_type_t* type = *(pony_type_t**)owner; uint32_t msg_id = type->event_notify; if(msg_id == (uint32_t)-1) return NULL; asio_event_t* ev = POOL_ALLOC(asio_event_t); ev->magic = ev; ev->owner = owner; ev->msg_id = msg_id; ev->fd = fd; ev->flags = flags; ev->noisy = noisy; ev->nsec = nsec; ev->writeable = false; ev->readable = false; // The event is effectively being sent to another thread, so mark it here. pony_ctx_t* ctx = pony_ctx(); pony_gc_send(ctx); pony_traceknown(ctx, owner, type, PONY_TRACE_OPAQUE); pony_send_done(ctx); pony_asio_event_subscribe(ev); return ev; } PONY_API void pony_asio_event_destroy(asio_event_t* ev) { if((ev == NULL) || (ev->magic != ev) || (ev->flags != ASIO_DISPOSABLE)) { pony_assert(0); return; } ev->flags = ASIO_DESTROYED; // When we let go of an event, we treat it as if we had received it back from // the asio thread. pony_ctx_t* ctx = pony_ctx(); pony_gc_recv(ctx); pony_traceunknown(ctx, ev->owner, PONY_TRACE_OPAQUE); pony_recv_done(ctx); POOL_FREE(asio_event_t, ev); } PONY_API int pony_asio_event_fd(asio_event_t* ev) { if(ev == NULL) return -1; return ev->fd; } PONY_API bool pony_asio_event_get_writeable(asio_event_t* ev) { if(ev == NULL) return false; return ev->writeable; } PONY_API void pony_asio_event_set_writeable(asio_event_t* ev, bool writeable) { if(ev != NULL) ev->writeable = writeable; } PONY_API bool pony_asio_event_get_readable(asio_event_t* ev) { if(ev == NULL) return false; return ev->readable; } PONY_API void pony_asio_event_set_readable(asio_event_t* ev, bool readable) { if(ev != NULL) ev->readable = readable; } PONY_API uint64_t pony_asio_event_nsec(asio_event_t* ev) { if(ev == NULL) return 0; return ev->nsec; } PONY_API void pony_asio_event_send(asio_event_t* ev, uint32_t flags, uint32_t arg) { asio_msg_t* m = (asio_msg_t*)pony_alloc_msg(POOL_INDEX(sizeof(asio_msg_t)), ev->msg_id); m->event = ev; m->flags = flags; m->arg = arg; #ifdef PLATFORM_IS_WINDOWS // On Windows, this can be called from an IOCP callback thread, which may // not have a pony_ctx() associated with it yet. pony_register_thread(); #endif // ASIO messages technically are application messages, but since they have no // sender they aren't covered by backpressure. We pass false for an early // bailout in the backpressure code. pony_sendv(pony_ctx(), ev->owner, &m->msg, &m->msg, false); // maybe wake up a scheduler thread if they've all fallen asleep ponyint_sched_maybe_wakeup_if_all_asleep(-1); }
1
12,846
This should be `return (ev->flags == ASIO_DISPOSABLE);` because `ASIO_DISPOSABLE` has a value of `0` as defined in the enum in `asio.h` and because when `ev->flags` is assigned `ASIO_DISPOSABLE` it is assigned as `ev->flags = ASIO_DISPOSABLE` unsetting any other flags previously assigned.
ponylang-ponyc
c
@@ -68,7 +68,8 @@ namespace Examples.Console providerBuilder .AddOtlpExporter(o => { - o.MetricExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds; + o.MetricReaderType = MetricReaderType.Periodic; + o.PeriodicExportingMetricReaderOptions.ExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds; o.AggregationTemporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative; }); }
1
// <copyright file="TestMetrics.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.Metrics; using System.Threading; using System.Threading.Tasks; using OpenTelemetry; using OpenTelemetry.Metrics; using OpenTelemetry.Resources; namespace Examples.Console { internal class TestMetrics { internal static object Run(MetricsOptions options) { using var meter = new Meter("TestMeter"); var providerBuilder = Sdk.CreateMeterProviderBuilder() .SetResourceBuilder(ResourceBuilder.CreateDefault().AddService("myservice")) .AddMeter(meter.Name); // All instruments from this meter are enabled. if (options.UseExporter.ToLower() == "otlp") { /* * Prerequisite to run this example: * Set up an OpenTelemetry Collector to run on local docker. * * Open a terminal window at the examples/Console/ directory and * launch the OpenTelemetry Collector with an OTLP receiver, by running: * * - On Unix based systems use: * docker run --rm -it -p 4317:4317 -v $(pwd):/cfg otel/opentelemetry-collector:0.33.0 --config=/cfg/otlp-collector-example/config.yaml * * - On Windows use: * docker run --rm -it -p 4317:4317 -v "%cd%":/cfg otel/opentelemetry-collector:0.33.0 --config=/cfg/otlp-collector-example/config.yaml * * Open another terminal window at the examples/Console/ directory and * launch the OTLP example by running: * * dotnet run metrics --useExporter otlp * * The OpenTelemetry Collector will output all received metrics to the stdout of its terminal. * */ // Adding the OtlpExporter creates a GrpcChannel. // This switch must be set before creating a GrpcChannel/HttpClient when calling an insecure gRPC service. // See: https://docs.microsoft.com/aspnet/core/grpc/troubleshoot#call-insecure-grpc-services-with-net-core-client AppContext.SetSwitch("System.Net.Http.SocketsHttpHandler.Http2UnencryptedSupport", true); providerBuilder .AddOtlpExporter(o => { o.MetricExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds; o.AggregationTemporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative; }); } else { providerBuilder .AddConsoleExporter(o => { o.MetricReaderType = MetricReaderType.Periodic; o.PeriodicExportingMetricReaderOptions.ExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds; o.AggregationTemporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative; }); } using var provider = providerBuilder.Build(); Counter<int> counter = null; if (options.FlagCounter ?? true) { counter = meter.CreateCounter<int>("counter", "things", "A count of things"); } Histogram<int> histogram = null; if (options.FlagHistogram ?? false) { histogram = meter.CreateHistogram<int>("histogram"); } if (options.FlagGauge ?? false) { var observableCounter = meter.CreateObservableGauge("gauge", () => { return new List<Measurement<int>>() { new Measurement<int>( (int)Process.GetCurrentProcess().PrivateMemorySize64, new KeyValuePair<string, object>("tag1", "value1")), }; }); } var cts = new CancellationTokenSource(); var tasks = new List<Task>(); for (int i = 0; i < options.NumTasks; i++) { var taskno = i; tasks.Add(Task.Run(() => { System.Console.WriteLine($"Task started {taskno + 1}/{options.NumTasks}."); var loops = 0; while (!cts.IsCancellationRequested) { if (options.MaxLoops > 0 && loops >= options.MaxLoops) { break; } histogram?.Record(10); histogram?.Record( 100, new KeyValuePair<string, object>("tag1", "value1")); histogram?.Record( 200, new KeyValuePair<string, object>("tag1", "value2"), new KeyValuePair<string, object>("tag2", "value2")); histogram?.Record( 100, new KeyValuePair<string, object>("tag1", "value1")); histogram?.Record( 200, new KeyValuePair<string, object>("tag2", "value2"), new KeyValuePair<string, object>("tag1", "value2")); counter?.Add(10); counter?.Add( 100, new KeyValuePair<string, object>("tag1", "value1")); counter?.Add( 200, new KeyValuePair<string, object>("tag1", "value2"), new KeyValuePair<string, object>("tag2", "value2")); counter?.Add( 100, new KeyValuePair<string, object>("tag1", "value1")); counter?.Add( 200, new KeyValuePair<string, object>("tag2", "value2"), new KeyValuePair<string, object>("tag1", "value2")); loops++; } })); } cts.CancelAfter(options.RunTime); System.Console.WriteLine($"Wait for {options.RunTime} milliseconds."); while (!cts.IsCancellationRequested) { Task.Delay(1000).Wait(); } Task.WaitAll(tasks.ToArray()); return null; } } }
1
22,758
this is not required, right? its the default already...
open-telemetry-opentelemetry-dotnet
.cs
@@ -49,6 +49,11 @@ const ( // to indicate that a reader has requested to read a TLF ID that // has been finalized, which isn't allowed. StatusCodeServerErrorCannotReadFinalizedTLF = 2812 + // StatusCodeServerErrorRequiredLockIsNotHeld is the error code returned by + // a MD write operation to indicate that a lockID that client required the + // write to be contingent to is not held at the time server tries to commit + // the MD, and as a result the MD is not written. + StatusCodeServerErrorRequiredLockIsNotHeld = 2813 ) // ServerError is a generic server-side error.
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package kbfsmd import ( "errors" "fmt" "strconv" "time" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/go-framed-msgpack-rpc/rpc" "github.com/keybase/kbfs/tlf" ) const ( // StatusCodeServerError is the error code for a generic server error. StatusCodeServerError = 2800 // StatusCodeServerErrorBadRequest is the error code for a generic client error. StatusCodeServerErrorBadRequest = 2801 // StatusCodeServerErrorConflictRevision is the error code for a revision conflict error. StatusCodeServerErrorConflictRevision = 2802 // StatusCodeServerErrorConflictPrevRoot is the error code for a PrevRoot pointer conflict error. StatusCodeServerErrorConflictPrevRoot = 2803 // StatusCodeServerErrorConflictDiskUsage is the error code for a disk usage conflict error. StatusCodeServerErrorConflictDiskUsage = 2804 // StatusCodeServerErrorLocked is the error code to indicate the folder truncation lock is locked. StatusCodeServerErrorLocked = 2805 // StatusCodeServerErrorUnauthorized is the error code to indicate the client is unauthorized to perform // a certain operation. This is also used to indicate an object isn't found. StatusCodeServerErrorUnauthorized = 2806 // StatusCodeServerErrorThrottle is the error code to indicate the client should initiate backoff. StatusCodeServerErrorThrottle = 2807 // StatusCodeServerErrorConditionFailed is the error code to indicate the write condition failed. StatusCodeServerErrorConditionFailed = 2808 // StatusCodeServerErrorWriteAccess is the error code to indicate the client isn't authorized to // write to a TLF. StatusCodeServerErrorWriteAccess = 2809 // StatusCodeServerErrorConflictFolderMapping is the error code for a folder handle to folder ID // mapping conflict error. StatusCodeServerErrorConflictFolderMapping = 2810 // StatusCodeServerErrorTooManyFoldersCreated is the error code to // indicate that the user has created more folders than their limit. StatusCodeServerErrorTooManyFoldersCreated = 2811 // StatusCodeServerErrorCannotReadFinalizedTLF is the error code // to indicate that a reader has requested to read a TLF ID that // has been finalized, which isn't allowed. StatusCodeServerErrorCannotReadFinalizedTLF = 2812 ) // ServerError is a generic server-side error. type ServerError struct { Err error } // ToStatus implements the ExportableError interface for ServerError. func (e ServerError) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerError s.Name = "SERVER_ERROR" s.Desc = e.Error() return } // Error implements the Error interface for ServerError. func (e ServerError) Error() string { if e.Err != nil { return e.Err.Error() } return "ServerError" } // ServerErrorBadRequest is a generic client-side error. type ServerErrorBadRequest struct { Reason string } // ToStatus implements the ExportableError interface for ServerErrorBadRequest. func (e ServerErrorBadRequest) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorBadRequest s.Name = "BAD_REQUEST" s.Desc = e.Reason return } // Error implements the Error interface for ServerErrorBadRequest. func (e ServerErrorBadRequest) Error() string { return fmt.Sprintf("Bad MD server request: %s", e.Reason) } // ServerErrorConflictRevision is returned when the passed MD block is inconsistent with current history. type ServerErrorConflictRevision struct { Desc string Expected Revision Actual Revision } // Error implements the Error interface for ServerErrorConflictRevision. func (e ServerErrorConflictRevision) Error() string { if e.Desc == "" { return fmt.Sprintf("Conflict: expected revision %d, actual %d", e.Expected, e.Actual) } return "MDServerConflictRevision{" + e.Desc + "}" } // ToStatus implements the ExportableError interface for ServerErrorConflictRevision. func (e ServerErrorConflictRevision) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorConflictRevision s.Name = "CONFLICT_REVISION" s.Desc = e.Error() return } // ServerErrorConflictPrevRoot is returned when the passed MD block is inconsistent with current history. type ServerErrorConflictPrevRoot struct { Desc string Expected ID Actual ID } // Error implements the Error interface for ServerErrorConflictPrevRoot. func (e ServerErrorConflictPrevRoot) Error() string { if e.Desc == "" { return fmt.Sprintf("Conflict: expected previous root %v, actual %v", e.Expected, e.Actual) } return "MDServerConflictPrevRoot{" + e.Desc + "}" } // ToStatus implements the ExportableError interface for ServerErrorConflictPrevRoot. func (e ServerErrorConflictPrevRoot) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorConflictPrevRoot s.Name = "CONFLICT_PREV_ROOT" s.Desc = e.Error() return } // ServerErrorConflictDiskUsage is returned when the passed MD block is inconsistent with current history. type ServerErrorConflictDiskUsage struct { Desc string Expected uint64 Actual uint64 } // ToStatus implements the ExportableError interface for ServerErrorConflictDiskUsage. func (e ServerErrorConflictDiskUsage) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorConflictDiskUsage s.Name = "CONFLICT_DISK_USAGE" s.Desc = e.Error() return } // Error implements the Error interface for ServerErrorConflictDiskUsage func (e ServerErrorConflictDiskUsage) Error() string { if e.Desc == "" { return fmt.Sprintf("Conflict: expected disk usage %d, actual %d", e.Expected, e.Actual) } return "ServerErrorConflictDiskUsage{" + e.Desc + "}" } // ServerErrorLocked is returned when the folder truncation lock is acquired by someone else. type ServerErrorLocked struct { } // Error implements the Error interface for ServerErrorLocked. func (e ServerErrorLocked) Error() string { return "ServerErrorLocked{}" } // ToStatus implements the ExportableError interface for ServerErrorLocked. func (e ServerErrorLocked) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorLocked s.Name = "LOCKED" s.Desc = e.Error() return } // ServerErrorUnauthorized is returned when a device requests a key half which doesn't belong to it. type ServerErrorUnauthorized struct { Err error } // Error implements the Error interface for ServerErrorUnauthorized. func (e ServerErrorUnauthorized) Error() string { msg := "MDServer Unauthorized" if e.Err != nil { msg += ": " + e.Err.Error() } return msg } // ToStatus implements the ExportableError interface for ServerErrorUnauthorized. func (e ServerErrorUnauthorized) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorUnauthorized s.Name = "UNAUTHORIZED" s.Desc = e.Error() return } // ServerErrorWriteAccess is returned when the client isn't authorized to // write to a TLF. type ServerErrorWriteAccess struct{} // Error implements the Error interface for ServerErrorWriteAccess. func (e ServerErrorWriteAccess) Error() string { return "ServerErrorWriteAccess{}" } // ToStatus implements the ExportableError interface for ServerErrorWriteAccess. func (e ServerErrorWriteAccess) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorWriteAccess s.Name = "WRITE_ACCESS" s.Desc = e.Error() return } // ServerErrorThrottle is returned when the server wants the client to backoff. type ServerErrorThrottle struct { Err error SuggestedRetryIn *time.Duration } // Error implements the Error interface for ServerErrorThrottle. func (e ServerErrorThrottle) Error() string { if e.SuggestedRetryIn == nil { return fmt.Sprintf("ServerErrorThrottle{%s}", e.Err.Error()) } return fmt.Sprintf("ServerErrorThrottle[%s]{%s}", *e.SuggestedRetryIn, e.Err.Error()) } // ToStatus implements the ExportableError interface for ServerErrorThrottle. func (e ServerErrorThrottle) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorThrottle s.Name = "THROTTLE" s.Desc = e.Err.Error() if e.SuggestedRetryIn != nil { s.Fields = append(s.Fields, keybase1.StringKVPair{ Key: "suggestedRetryInMS", Value: strconv.FormatInt(int64((*e.SuggestedRetryIn)/time.Millisecond), 10), }) } return } // ServerErrorConditionFailed is returned when a conditonal write failed. // This means there was a race and the caller should consider it a conflict. type ServerErrorConditionFailed struct { Err error ShouldThrottle bool } // Error implements the Error interface for ServerErrorConditionFailed. func (e ServerErrorConditionFailed) Error() string { return "ServerErrorConditionFailed{" + e.Err.Error() + "}" } // ToStatus implements the ExportableError interface for ServerErrorConditionFailed. func (e ServerErrorConditionFailed) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorConditionFailed s.Name = "CONDITION_FAILED" s.Desc = e.Err.Error() s.Fields = []keybase1.StringKVPair{ keybase1.StringKVPair{ Key: "ShouldThrottle", Value: strconv.FormatBool(e.ShouldThrottle), }, } return } // ServerErrorConflictFolderMapping is returned when there is a folder handle to folder // ID mapping mismatch. type ServerErrorConflictFolderMapping struct { Desc string Expected tlf.ID Actual tlf.ID } // Error implements the Error interface for ServerErrorConflictFolderMapping. func (e ServerErrorConflictFolderMapping) Error() string { if e.Desc == "" { return fmt.Sprintf("Conflict: expected folder ID %s, actual %s", e.Expected, e.Actual) } return "ServerErrorConflictFolderMapping{" + e.Desc + "}" } // ToStatus implements the ExportableError interface for ServerErrorConflictFolderMapping func (e ServerErrorConflictFolderMapping) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorConflictFolderMapping s.Name = "CONFLICT_FOLDER_MAPPING" s.Desc = e.Error() return } // ServerErrorTooManyFoldersCreated is returned when a user has created more // folders than their limit allows. type ServerErrorTooManyFoldersCreated struct { Created uint64 Limit uint64 } // Error implements the Error interface for ServerErrorTooManyFoldersCreated. func (e ServerErrorTooManyFoldersCreated) Error() string { return fmt.Sprintf("Too many folders created. Created: %d, limit: %d", e.Created, e.Limit) } // ToStatus implements the ExportableError interface for ServerErrorConflictFolderMapping func (e ServerErrorTooManyFoldersCreated) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorTooManyFoldersCreated s.Name = "TOO_MANY_FOLDERS_CREATED" s.Desc = e.Error() s.Fields = []keybase1.StringKVPair{ {Key: "Limit", Value: strconv.FormatUint(e.Limit, 10)}, {Key: "Created", Value: strconv.FormatUint(e.Created, 10)}, } return } // ServerErrorCannotReadFinalizedTLF is returned when the client // isn't authorized to read a finalized TLF. type ServerErrorCannotReadFinalizedTLF struct{} // Error implements the Error interface for // ServerErrorCannotReadFinalizedTLF. func (e ServerErrorCannotReadFinalizedTLF) Error() string { return "ServerErrorCannotReadFinalizedTLF{}" } // ToStatus implements the ExportableError interface for // ServerErrorCannotReadFinalizedTLF. func (e ServerErrorCannotReadFinalizedTLF) ToStatus() (s keybase1.Status) { s.Code = StatusCodeServerErrorCannotReadFinalizedTLF s.Name = "CANNOT_READ_FINALIZED_TLF" s.Desc = e.Error() return } // ServerErrorUnwrapper is an implementation of rpc.ErrorUnwrapper // for errors coming from the MDServer. type ServerErrorUnwrapper struct{} var _ rpc.ErrorUnwrapper = ServerErrorUnwrapper{} // MakeArg implements rpc.ErrorUnwrapper for ServerErrorUnwrapper. func (eu ServerErrorUnwrapper) MakeArg() interface{} { return &keybase1.Status{} } // UnwrapError implements rpc.ErrorUnwrapper for ServerErrorUnwrapper. func (eu ServerErrorUnwrapper) UnwrapError(arg interface{}) (appError error, dispatchError error) { s, ok := arg.(*keybase1.Status) if !ok { return nil, errors.New("Error converting arg to keybase1.Status object in ServerErrorUnwrapper.UnwrapError") } if s == nil || s.Code == 0 { return nil, nil } switch s.Code { case StatusCodeServerError: appError = ServerError{errors.New(s.Desc)} break case StatusCodeServerErrorBadRequest: appError = ServerErrorBadRequest{Reason: s.Desc} break case StatusCodeServerErrorConflictRevision: appError = ServerErrorConflictRevision{Desc: s.Desc} break case StatusCodeServerErrorConflictPrevRoot: appError = ServerErrorConflictPrevRoot{Desc: s.Desc} break case StatusCodeServerErrorConflictDiskUsage: appError = ServerErrorConflictDiskUsage{Desc: s.Desc} break case StatusCodeServerErrorLocked: appError = ServerErrorLocked{} break case StatusCodeServerErrorUnauthorized: appError = ServerErrorUnauthorized{} break case StatusCodeServerErrorThrottle: var suggestedRetryIn *time.Duration for _, kv := range s.Fields { if kv.Key == "suggestedRetryInMS" { if ms, err := strconv.Atoi(kv.Value); err != nil { d := time.Duration(ms) * time.Millisecond suggestedRetryIn = &d } break } } appError = ServerErrorThrottle{ Err: errors.New(s.Desc), SuggestedRetryIn: suggestedRetryIn, } break case StatusCodeServerErrorConditionFailed: shouldThrottle := false for _, kv := range s.Fields { if kv.Key == "ShouldThrottle" { shouldThrottle, _ = strconv.ParseBool(kv.Value) break } } appError = ServerErrorConditionFailed{ Err: errors.New(s.Desc), ShouldThrottle: shouldThrottle, } break case StatusCodeServerErrorWriteAccess: appError = ServerErrorWriteAccess{} break case StatusCodeServerErrorConflictFolderMapping: appError = ServerErrorConflictFolderMapping{Desc: s.Desc} break case StatusCodeServerErrorTooManyFoldersCreated: err := ServerErrorTooManyFoldersCreated{} for _, f := range s.Fields { switch f.Key { case "Limit": err.Limit, _ = strconv.ParseUint(f.Value, 10, 64) case "Created": err.Created, _ = strconv.ParseUint(f.Value, 10, 64) } } appError = err break case StatusCodeServerErrorCannotReadFinalizedTLF: appError = ServerErrorCannotReadFinalizedTLF{} break default: ase := libkb.AppStatusError{ Code: s.Code, Name: s.Name, Desc: s.Desc, Fields: make(map[string]string), } for _, f := range s.Fields { ase.Fields[f.Key] = f.Value } appError = ase } return appError, nil }
1
17,816
"contingent to" -> "contingent on"
keybase-kbfs
go
@@ -19,6 +19,14 @@ Exceptions/Errors used in Koalas. """ +class GroupByError(Exception): + pass + + +class DataError(GroupByError): + pass + + class SparkPandasIndexingError(Exception): pass
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Exceptions/Errors used in Koalas. """ class SparkPandasIndexingError(Exception): pass def code_change_hint(pandas_function, spark_target_function): if pandas_function is not None and spark_target_function is not None: return "You are trying to use pandas function {}, use spark function {}".format( pandas_function, spark_target_function ) elif pandas_function is not None and spark_target_function is None: return ( "You are trying to use pandas function {}, checkout the spark " "user guide to find a relevant function" ).format(pandas_function) elif pandas_function is None and spark_target_function is not None: return "Use spark function {}".format(spark_target_function) else: # both none return "Checkout the spark user guide to find a relevant function" class SparkPandasNotImplementedError(NotImplementedError): def __init__(self, pandas_function=None, spark_target_function=None, description=""): self.pandas_source = pandas_function self.spark_target = spark_target_function hint = code_change_hint(pandas_function, spark_target_function) if len(description) > 0: description += " " + hint else: description = hint super(SparkPandasNotImplementedError, self).__init__(description) class PandasNotImplementedError(NotImplementedError): def __init__( self, class_name, method_name=None, arg_name=None, property_name=None, deprecated=False, reason="", ): assert (method_name is None) != (property_name is None) self.class_name = class_name self.method_name = method_name self.arg_name = arg_name if method_name is not None: if arg_name is not None: msg = "The method `{0}.{1}()` does not support `{2}` parameter. {3}".format( class_name, method_name, arg_name, reason ) else: if deprecated: msg = ( "The method `{0}.{1}()` is deprecated in pandas and will therefore " + "not be supported in Koalas. {2}" ).format(class_name, method_name, reason) else: if reason == "": reason = " yet." else: reason = ". " + reason msg = "The method `{0}.{1}()` is not implemented{2}".format( class_name, method_name, reason ) else: if deprecated: msg = ( "The property `{0}.{1}()` is deprecated in pandas and will therefore " + "not be supported in Koalas. {2}" ).format(class_name, property_name, reason) else: if reason == "": reason = " yet." else: reason = ". " + reason msg = "The property `{0}.{1}()` is not implemented{2}".format( class_name, property_name, reason ) super(NotImplementedError, self).__init__(msg)
1
16,003
Does pandas throw an exception like this?
databricks-koalas
py
@@ -538,14 +538,14 @@ cvdescriptorset::AllocateDescriptorSetsData::AllocateDescriptorSetsData(uint32_t cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, const VkDescriptorPool pool, const std::shared_ptr<DescriptorSetLayout const> &layout, uint32_t variable_count, - CoreChecks *dev_data) + cvdescriptorset::DescriptorSet::StateTracker *state_data) : some_update_(false), set_(set), pool_state_(nullptr), p_layout_(layout), - device_data_(dev_data), + state_data_(state_data), variable_count_(variable_count) { - pool_state_ = dev_data->GetDescriptorPoolState(pool); + pool_state_ = state_data->GetDescriptorPoolState(pool); // Foreach binding, create default descriptors of given type descriptors_.reserve(p_layout_->GetTotalDescriptorCount()); for (uint32_t i = 0; i < p_layout_->GetBindingCount(); ++i) {
1
/* Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (C) 2015-2019 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Tobin Ehlis <tobine@google.com> * John Zulauf <jzulauf@lunarg.com> */ // Allow use of STL min and max functions in Windows #define NOMINMAX #include "chassis.h" #include "core_validation_error_enums.h" #include "core_validation.h" #include "descriptor_sets.h" #include "hash_vk_types.h" #include "vk_enum_string_helper.h" #include "vk_safe_struct.h" #include "vk_typemap_helper.h" #include "buffer_validation.h" #include <sstream> #include <algorithm> #include <array> #include <memory> // ExtendedBinding collects a VkDescriptorSetLayoutBinding and any extended // state that comes from a different array/structure so they can stay together // while being sorted by binding number. struct ExtendedBinding { ExtendedBinding(const VkDescriptorSetLayoutBinding *l, VkDescriptorBindingFlagsEXT f) : layout_binding(l), binding_flags(f) {} const VkDescriptorSetLayoutBinding *layout_binding; VkDescriptorBindingFlagsEXT binding_flags; }; struct BindingNumCmp { bool operator()(const ExtendedBinding &a, const ExtendedBinding &b) const { return a.layout_binding->binding < b.layout_binding->binding; } }; using DescriptorSet = cvdescriptorset::DescriptorSet; using DescriptorSetLayout = cvdescriptorset::DescriptorSetLayout; using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef; using DescriptorSetLayoutId = cvdescriptorset::DescriptorSetLayoutId; // Canonical dictionary of DescriptorSetLayoutDef (without any handle/device specific information) cvdescriptorset::DescriptorSetLayoutDict descriptor_set_layout_dict; DescriptorSetLayoutId GetCanonicalId(const VkDescriptorSetLayoutCreateInfo *p_create_info) { return descriptor_set_layout_dict.look_up(DescriptorSetLayoutDef(p_create_info)); } // Construct DescriptorSetLayout instance from given create info // Proactively reserve and resize as possible, as the reallocation was visible in profiling cvdescriptorset::DescriptorSetLayoutDef::DescriptorSetLayoutDef(const VkDescriptorSetLayoutCreateInfo *p_create_info) : flags_(p_create_info->flags), binding_count_(0), descriptor_count_(0), dynamic_descriptor_count_(0) { const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(p_create_info->pNext); binding_type_stats_ = {0, 0, 0}; std::set<ExtendedBinding, BindingNumCmp> sorted_bindings; const uint32_t input_bindings_count = p_create_info->bindingCount; // Sort the input bindings in binding number order, eliminating duplicates for (uint32_t i = 0; i < input_bindings_count; i++) { VkDescriptorBindingFlagsEXT flags = 0; if (flags_create_info && flags_create_info->bindingCount == p_create_info->bindingCount) { flags = flags_create_info->pBindingFlags[i]; } sorted_bindings.insert(ExtendedBinding(p_create_info->pBindings + i, flags)); } // Store the create info in the sorted order from above std::map<uint32_t, uint32_t> binding_to_dyn_count; uint32_t index = 0; binding_count_ = static_cast<uint32_t>(sorted_bindings.size()); bindings_.reserve(binding_count_); binding_flags_.reserve(binding_count_); binding_to_index_map_.reserve(binding_count_); for (auto input_binding : sorted_bindings) { // Add to binding and map, s.t. it is robust to invalid duplication of binding_num const auto binding_num = input_binding.layout_binding->binding; binding_to_index_map_[binding_num] = index++; bindings_.emplace_back(input_binding.layout_binding); auto &binding_info = bindings_.back(); binding_flags_.emplace_back(input_binding.binding_flags); descriptor_count_ += binding_info.descriptorCount; if (binding_info.descriptorCount > 0) { non_empty_bindings_.insert(binding_num); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { binding_to_dyn_count[binding_num] = binding_info.descriptorCount; dynamic_descriptor_count_ += binding_info.descriptorCount; binding_type_stats_.dynamic_buffer_count++; } else if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { binding_type_stats_.non_dynamic_buffer_count++; } else { binding_type_stats_.image_sampler_count++; } } assert(bindings_.size() == binding_count_); assert(binding_flags_.size() == binding_count_); uint32_t global_index = 0; global_index_range_.reserve(binding_count_); // Vector order is finalized so build vectors of descriptors and dynamic offsets by binding index for (uint32_t i = 0; i < binding_count_; ++i) { auto final_index = global_index + bindings_[i].descriptorCount; global_index_range_.emplace_back(global_index, final_index); global_index = final_index; } // Now create dyn offset array mapping for any dynamic descriptors uint32_t dyn_array_idx = 0; binding_to_dynamic_array_idx_map_.reserve(binding_to_dyn_count.size()); for (const auto &bc_pair : binding_to_dyn_count) { binding_to_dynamic_array_idx_map_[bc_pair.first] = dyn_array_idx; dyn_array_idx += bc_pair.second; } } size_t cvdescriptorset::DescriptorSetLayoutDef::hash() const { hash_util::HashCombiner hc; hc << flags_; hc.Combine(bindings_); hc.Combine(binding_flags_); return hc.Value(); } // // Return valid index or "end" i.e. binding_count_; // The asserts in "Get" are reduced to the set where no valid answer(like null or 0) could be given // Common code for all binding lookups. uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromBinding(uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.cend()) return bi_itr->second; return GetBindingCount(); } VkDescriptorSetLayoutBinding const *cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorSetLayoutBindingPtrFromIndex( const uint32_t index) const { if (index >= bindings_.size()) return nullptr; return bindings_[index].ptr(); } // Return descriptorCount for given index, 0 if index is unavailable uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorCountFromIndex(const uint32_t index) const { if (index >= bindings_.size()) return 0; return bindings_[index].descriptorCount; } // For the given index, return descriptorType VkDescriptorType cvdescriptorset::DescriptorSetLayoutDef::GetTypeFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].descriptorType; return VK_DESCRIPTOR_TYPE_MAX_ENUM; } // For the given index, return stageFlags VkShaderStageFlags cvdescriptorset::DescriptorSetLayoutDef::GetStageFlagsFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].stageFlags; return VkShaderStageFlags(0); } // Return binding flags for given index, 0 if index is unavailable VkDescriptorBindingFlagsEXT cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorBindingFlagsFromIndex( const uint32_t index) const { if (index >= binding_flags_.size()) return 0; return binding_flags_[index]; } const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromIndex(uint32_t index) const { const static IndexRange kInvalidRange = {0xFFFFFFFF, 0xFFFFFFFF}; if (index >= binding_flags_.size()) return kInvalidRange; return global_index_range_[index]; } // For the given binding, return the global index range (half open) // As start and end are often needed in pairs, get both with a single lookup. const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromBinding( const uint32_t binding) const { uint32_t index = GetIndexFromBinding(binding); return GetGlobalIndexRangeFromIndex(index); } // For given binding, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromBinding(const uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { return bindings_[bi_itr->second].pImmutableSamplers; } return nullptr; } // Move to next valid binding having a non-zero binding count uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetNextValidBinding(const uint32_t binding) const { auto it = non_empty_bindings_.upper_bound(binding); assert(it != non_empty_bindings_.cend()); if (it != non_empty_bindings_.cend()) return *it; return GetMaxBinding() + 1; } // For given index, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromIndex(const uint32_t index) const { if (index < bindings_.size()) { return bindings_[index].pImmutableSamplers; } return nullptr; } // If our layout is compatible with rh_ds_layout, return true. bool cvdescriptorset::DescriptorSetLayout::IsCompatible(DescriptorSetLayout const *rh_ds_layout) const { bool compatible = (this == rh_ds_layout) || (GetLayoutDef() == rh_ds_layout->GetLayoutDef()); return compatible; } // If our layout is compatible with rh_ds_layout, return true, // else return false and fill in error_msg will description of what causes incompatibility bool cvdescriptorset::VerifySetLayoutCompatibility(DescriptorSetLayout const *lh_ds_layout, DescriptorSetLayout const *rh_ds_layout, std::string *error_msg) { // Short circuit the detailed check. if (lh_ds_layout->IsCompatible(rh_ds_layout)) return true; // Do a detailed compatibility check of this lhs def (referenced by lh_ds_layout), vs. the rhs (layout and def) // Should only be run if trivial accept has failed, and in that context should return false. VkDescriptorSetLayout lh_dsl_handle = lh_ds_layout->GetDescriptorSetLayout(); VkDescriptorSetLayout rh_dsl_handle = rh_ds_layout->GetDescriptorSetLayout(); DescriptorSetLayoutDef const *lh_ds_layout_def = lh_ds_layout->GetLayoutDef(); DescriptorSetLayoutDef const *rh_ds_layout_def = rh_ds_layout->GetLayoutDef(); // Check descriptor counts if (lh_ds_layout_def->GetTotalDescriptorCount() != rh_ds_layout_def->GetTotalDescriptorCount()) { std::stringstream error_str; error_str << "DescriptorSetLayout " << lh_dsl_handle << " has " << lh_ds_layout_def->GetTotalDescriptorCount() << " descriptors, but DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has " << rh_ds_layout_def->GetTotalDescriptorCount() << " descriptors."; *error_msg = error_str.str(); return false; // trivial fail case } // Descriptor counts match so need to go through bindings one-by-one // and verify that type and stageFlags match for (const auto &binding : lh_ds_layout_def->GetBindings()) { // TODO : Do we also need to check immutable samplers? // VkDescriptorSetLayoutBinding *rh_binding; if (binding.descriptorCount != rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " has a descriptorCount of " << binding.descriptorCount << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has a descriptorCount of " << rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding); *error_msg = error_str.str(); return false; } else if (binding.descriptorType != rh_ds_layout_def->GetTypeFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " is type '" << string_VkDescriptorType(binding.descriptorType) << "' but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, is type '" << string_VkDescriptorType(rh_ds_layout_def->GetTypeFromBinding(binding.binding)) << "'"; *error_msg = error_str.str(); return false; } else if (binding.stageFlags != rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " has stageFlags " << binding.stageFlags << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has stageFlags " << rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding); *error_msg = error_str.str(); return false; } } // No detailed check should succeed if the trivial check failed -- or the dictionary has failed somehow. bool compatible = true; assert(!compatible); return compatible; } bool cvdescriptorset::DescriptorSetLayoutDef::IsNextBindingConsistent(const uint32_t binding) const { if (!binding_to_index_map_.count(binding + 1)) return false; auto const &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { const auto &next_bi_itr = binding_to_index_map_.find(binding + 1); if (next_bi_itr != binding_to_index_map_.end()) { auto type = bindings_[bi_itr->second].descriptorType; auto stage_flags = bindings_[bi_itr->second].stageFlags; auto immut_samp = bindings_[bi_itr->second].pImmutableSamplers ? true : false; auto flags = binding_flags_[bi_itr->second]; if ((type != bindings_[next_bi_itr->second].descriptorType) || (stage_flags != bindings_[next_bi_itr->second].stageFlags) || (immut_samp != (bindings_[next_bi_itr->second].pImmutableSamplers ? true : false)) || (flags != binding_flags_[next_bi_itr->second])) { return false; } return true; } } return false; } // The DescriptorSetLayout stores the per handle data for a descriptor set layout, and references the common defintion for the // handle invariant portion cvdescriptorset::DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *p_create_info, const VkDescriptorSetLayout layout) : layout_(layout), layout_destroyed_(false), layout_id_(GetCanonicalId(p_create_info)) {} // Validate descriptor set layout create info bool cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( const debug_report_data *report_data, const VkDescriptorSetLayoutCreateInfo *create_info, const bool push_descriptor_ext, const uint32_t max_push_descriptors, const bool descriptor_indexing_ext, const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *descriptor_indexing_features, const VkPhysicalDeviceInlineUniformBlockFeaturesEXT *inline_uniform_block_features, const VkPhysicalDeviceInlineUniformBlockPropertiesEXT *inline_uniform_block_props) { bool skip = false; std::unordered_set<uint32_t> bindings; uint64_t total_descriptors = 0; const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(create_info->pNext); const bool push_descriptor_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); if (push_descriptor_set && !push_descriptor_ext) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ExtensionNotEnabled, "Attempted to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR", "VkDescriptorSetLayoutCreateInfo::flags", VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } const bool update_after_bind_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT); if (update_after_bind_set && !descriptor_indexing_ext) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ExtensionNotEnabled, "Attemped to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT", "VkDescriptorSetLayoutCreateInfo::flags", VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } auto valid_type = [push_descriptor_set](const VkDescriptorType type) { return !push_descriptor_set || ((type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)); }; uint32_t max_binding = 0; for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; max_binding = std::max(max_binding, binding_info.binding); if (!bindings.insert(binding_info.binding).second) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279", "duplicated binding number in VkDescriptorSetLayoutBinding."); } if (!valid_type(binding_info.descriptorType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) ? "VUID-VkDescriptorSetLayoutCreateInfo-flags-02208" : "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280", "invalid type %s ,for push descriptors in VkDescriptorSetLayoutBinding entry %" PRIu32 ".", string_VkDescriptorType(binding_info.descriptorType), i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((binding_info.descriptorCount % 4) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209", "descriptorCount =(%" PRIu32 ") must be a multiple of 4", binding_info.descriptorCount); } if (binding_info.descriptorCount > inline_uniform_block_props->maxInlineUniformBlockSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210", "descriptorCount =(%" PRIu32 ") must be less than or equal to maxInlineUniformBlockSize", binding_info.descriptorCount); } } total_descriptors += binding_info.descriptorCount; } if (flags_create_info) { if (flags_create_info->bindingCount != 0 && flags_create_info->bindingCount != create_info->bindingCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002", "VkDescriptorSetLayoutCreateInfo::bindingCount (%d) != " "VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount (%d)", create_info->bindingCount, flags_create_info->bindingCount); } if (flags_create_info->bindingCount == create_info->bindingCount) { for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) { if (!update_after_bind_set) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER && !descriptor_indexing_features->descriptorBindingUniformBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingUniformBufferUpdateAfterBind-03005", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) && !descriptor_indexing_features->descriptorBindingSampledImageUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingSampledImageUpdateAfterBind-03006", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !descriptor_indexing_features->descriptorBindingStorageImageUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageImageUpdateAfterBind-03007", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER && !descriptor_indexing_features->descriptorBindingStorageBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageBufferUpdateAfterBind-03008", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER && !descriptor_indexing_features->descriptorBindingUniformTexelBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingUniformTexelBufferUpdateAfterBind-03009", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !descriptor_indexing_features->descriptorBindingStorageTexelBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageTexelBufferUpdateAfterBind-03010", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-None-03011", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT && !inline_uniform_block_features->descriptorBindingInlineUniformBlockUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingInlineUniformBlockUpdateAfterBind-02211", "Invalid flags (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) for " "VkDescriptorSetLayoutBinding entry %" PRIu32 " with descriptorBindingInlineUniformBlockUpdateAfterBind not enabled", i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT) { if (!descriptor_indexing_features->descriptorBindingUpdateUnusedWhilePending) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUpdateUnusedWhilePending-03012", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT) { if (!descriptor_indexing_features->descriptorBindingPartiallyBound) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingPartiallyBound-03013", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT) { if (binding_info.binding != max_binding) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03004", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (!descriptor_indexing_features->descriptorBindingVariableDescriptorCount) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingVariableDescriptorCount-03014", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03015", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (push_descriptor_set && (flags_create_info->pBindingFlags[i] & (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-flags-03003", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } } } if ((push_descriptor_set) && (total_descriptors > max_push_descriptors)) { const char *undefined = push_descriptor_ext ? "" : " -- undefined"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281", "for push descriptor, total descriptor count in layout (%" PRIu64 ") must not be greater than VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors (%" PRIu32 "%s).", total_descriptors, max_push_descriptors, undefined); } return skip; } cvdescriptorset::AllocateDescriptorSetsData::AllocateDescriptorSetsData(uint32_t count) : required_descriptors_by_type{}, layout_nodes(count, nullptr) {} cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, const VkDescriptorPool pool, const std::shared_ptr<DescriptorSetLayout const> &layout, uint32_t variable_count, CoreChecks *dev_data) : some_update_(false), set_(set), pool_state_(nullptr), p_layout_(layout), device_data_(dev_data), variable_count_(variable_count) { pool_state_ = dev_data->GetDescriptorPoolState(pool); // Foreach binding, create default descriptors of given type descriptors_.reserve(p_layout_->GetTotalDescriptorCount()); for (uint32_t i = 0; i < p_layout_->GetBindingCount(); ++i) { auto type = p_layout_->GetTypeFromIndex(i); switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLER: { auto immut_sampler = p_layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut_sampler) { descriptors_.emplace_back(new SamplerDescriptor(immut_sampler + di)); some_update_ = true; // Immutable samplers are updated at creation } else descriptors_.emplace_back(new SamplerDescriptor(nullptr)); } break; } case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { auto immut = p_layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut) { descriptors_.emplace_back(new ImageSamplerDescriptor(immut + di)); some_update_ = true; // Immutable samplers are updated at creation } else descriptors_.emplace_back(new ImageSamplerDescriptor(nullptr)); } break; } // ImageDescriptors case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new ImageDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new TexelDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new BufferDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new InlineUniformDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new AccelerationStructureDescriptor(type)); break; default: assert(0); // Bad descriptor type specified break; } } } cvdescriptorset::DescriptorSet::~DescriptorSet() { InvalidateBoundCmdBuffers(); } static std::string StringDescriptorReqViewType(descriptor_req req) { std::string result(""); for (unsigned i = 0; i <= VK_IMAGE_VIEW_TYPE_END_RANGE; i++) { if (req & (1 << i)) { if (result.size()) result += ", "; result += string_VkImageViewType(VkImageViewType(i)); } } if (!result.size()) result = "(none)"; return result; } static char const *StringDescriptorReqComponentType(descriptor_req req) { if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_SINT) return "SINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_UINT) return "UINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT) return "FLOAT"; return "(none)"; } static unsigned DescriptorRequirementsBitsFromFormat(VkFormat fmt) { if (FormatIsSInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_SINT; if (FormatIsUInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (FormatIsDepthAndStencil(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT | DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (fmt == VK_FORMAT_UNDEFINED) return 0; // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader. return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT; } // Validate that the state of this set is appropriate for the given bindings and dynamic_offsets at Draw time // This includes validating that all descriptors in the given bindings are updated, // that any update buffers are valid, and that any dynamic offsets are within the bounds of their buffers. // Return true if state is acceptable, or false and write an error message into error string bool cvdescriptorset::ValidateDrawState(const DescriptorSet *descriptor_set, const std::map<uint32_t, descriptor_req> &bindings, const std::vector<uint32_t> &dynamic_offsets, CMD_BUFFER_STATE *cb_node, const char *caller, std::string *error) { auto device_data = descriptor_set->GetDeviceData(); for (auto binding_pair : bindings) { auto binding = binding_pair.first; DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(), binding); if (binding_it.AtEnd()) { // End at construction is the condition for an invalid binding. std::stringstream error_str; error_str << "Attempting to validate DrawState for binding #" << binding << " which is an invalid binding for this descriptor set."; *error = error_str.str(); return false; } // Copy the range, the end range is subject to update based on variable length descriptor arrays. cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange(); auto array_idx = 0; // Track array idx if we're dealing with array descriptors if (binding_it.IsVariableDescriptorCount()) { // Only validate the first N descriptors if it uses variable_count index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount(); } for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) { uint32_t index = i - index_range.start; const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i); if ((binding_it.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT)) || descriptor->GetClass() == InlineUniform) { // Can't validate the descriptor because it may not have been updated, // or the view could have been destroyed continue; } else if (!descriptor->updated) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is being used in draw but has never been updated via vkUpdateDescriptorSets() or a similar call."; *error = error_str.str(); return false; } else { auto descriptor_class = descriptor->GetClass(); if (descriptor_class == GeneralBuffer) { // Verify that buffers are valid auto buffer = static_cast<const BufferDescriptor *>(descriptor)->GetBuffer(); auto buffer_node = device_data->GetBufferState(buffer); if (!buffer_node) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " references invalid buffer " << buffer << "."; *error = error_str.str(); return false; } else if (!buffer_node->sparse) { for (auto mem_binding : buffer_node->GetBoundMemory()) { if (!device_data->GetDevMemState(mem_binding)) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " that references invalid memory " << mem_binding << "."; *error = error_str.str(); return false; } } } if (descriptor->IsDynamic()) { // Validate that dynamic offsets are within the buffer auto buffer_size = buffer_node->createInfo.size; auto range = static_cast<const BufferDescriptor *>(descriptor)->GetRange(); auto desc_offset = static_cast<const BufferDescriptor *>(descriptor)->GetOffset(); auto dyn_offset = dynamic_offsets[binding_it.GetDynamicOffsetIndex() + array_idx]; if (VK_WHOLE_SIZE == range) { if ((dyn_offset + desc_offset) > buffer_size) { std::stringstream error_str; error_str << "Dynamic descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " with update range of VK_WHOLE_SIZE has dynamic offset " << dyn_offset << " combined with offset " << desc_offset << " that oversteps the buffer size of " << buffer_size << "."; *error = error_str.str(); return false; } } else { if ((dyn_offset + desc_offset + range) > buffer_size) { std::stringstream error_str; error_str << "Dynamic descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " with dynamic offset " << dyn_offset << " combined with offset " << desc_offset << " and range " << range << " that oversteps the buffer size of " << buffer_size << "."; *error = error_str.str(); return false; } } } } else if (descriptor_class == ImageSampler || descriptor_class == Image) { VkImageView image_view; VkImageLayout image_layout; if (descriptor_class == ImageSampler) { image_view = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageView(); image_layout = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageLayout(); } else { image_view = static_cast<const ImageDescriptor *>(descriptor)->GetImageView(); image_layout = static_cast<const ImageDescriptor *>(descriptor)->GetImageLayout(); } auto reqs = binding_pair.second; auto image_view_state = device_data->GetImageViewState(image_view); if (nullptr == image_view_state) { // Image view must have been destroyed since initial update. Could potentially flag the descriptor // as "invalid" (updated = false) at DestroyImageView() time and detect this error at bind time std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using imageView " << device_data->report_data->FormatHandle(image_view).c_str() << " that has been destroyed."; *error = error_str.str(); return false; } auto image_view_ci = image_view_state->create_info; if ((reqs & DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS) && (~reqs & (1 << image_view_ci.viewType))) { // bad view type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires an image view of type " << StringDescriptorReqViewType(reqs) << " but got " << string_VkImageViewType(image_view_ci.viewType) << "."; *error = error_str.str(); return false; } auto format_bits = DescriptorRequirementsBitsFromFormat(image_view_ci.format); if (!(reqs & format_bits)) { // bad component type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires " << StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is " << string_VkFormat(image_view_ci.format) << "."; *error = error_str.str(); return false; } auto image_node = device_data->GetImageState(image_view_ci.image); assert(image_node); // Verify Image Layout // No "invalid layout" VUID required for this call, since the optimal_layout parameter is UNDEFINED. bool hit_error = false; device_data->VerifyImageLayout(cb_node, image_node, image_view_state->normalized_subresource_range, image_view_ci.subresourceRange.aspectMask, image_layout, VK_IMAGE_LAYOUT_UNDEFINED, caller, kVUIDUndefined, "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error); if (hit_error) { *error = "Image layout specified at vkUpdateDescriptorSet* or vkCmdPushDescriptorSet* time " "doesn't match actual image layout at time descriptor is used. See previous error callback for " "specific details."; return false; } // Verify Sample counts if ((reqs & DESCRIPTOR_REQ_SINGLE_SAMPLE) && image_node->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires bound image to have VK_SAMPLE_COUNT_1_BIT but got " << string_VkSampleCountFlagBits(image_node->createInfo.samples) << "."; *error = error_str.str(); return false; } if ((reqs & DESCRIPTOR_REQ_MULTI_SAMPLE) && image_node->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires bound image to have multiple samples, but got VK_SAMPLE_COUNT_1_BIT."; *error = error_str.str(); return false; } } else if (descriptor_class == TexelBuffer) { auto texel_buffer = static_cast<const TexelDescriptor *>(descriptor); auto buffer_view = device_data->GetBufferViewState(texel_buffer->GetBufferView()); if (nullptr == buffer_view) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using bufferView " << buffer_view << " that has been destroyed."; *error = error_str.str(); return false; } auto buffer = buffer_view->create_info.buffer; auto buffer_state = device_data->GetBufferState(buffer); if (!buffer_state) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using buffer " << buffer_state << " that has been destroyed."; *error = error_str.str(); return false; } auto reqs = binding_pair.second; auto format_bits = DescriptorRequirementsBitsFromFormat(buffer_view->create_info.format); if (!(reqs & format_bits)) { // bad component type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires " << StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is " << string_VkFormat(buffer_view->create_info.format) << "."; *error = error_str.str(); return false; } } if (descriptor_class == ImageSampler || descriptor_class == PlainSampler) { // Verify Sampler still valid VkSampler sampler; if (descriptor_class == ImageSampler) { sampler = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetSampler(); } else { sampler = static_cast<const SamplerDescriptor *>(descriptor)->GetSampler(); } if (!ValidateSampler(sampler, device_data)) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using sampler " << sampler << " that has been destroyed."; *error = error_str.str(); return false; } else { SAMPLER_STATE *sampler_state = device_data->GetSamplerState(sampler); if (sampler_state->samplerConversion && !descriptor->IsImmutableSampler()) { std::stringstream error_str; error_str << "sampler (" << sampler << ") in the descriptor set (" << descriptor_set->GetSet() << ") contains a YCBCR conversion (" << sampler_state->samplerConversion << ") , then the sampler MUST also exists as an immutable sampler."; *error = error_str.str(); } } } } } } return true; } // For given bindings, place any update buffers or images into the passed-in unordered_sets uint32_t cvdescriptorset::DescriptorSet::GetStorageUpdates(const std::map<uint32_t, descriptor_req> &bindings, std::unordered_set<VkBuffer> *buffer_set, std::unordered_set<VkImageView> *image_set) const { auto num_updates = 0; for (auto binding_pair : bindings) { auto binding = binding_pair.first; // If a binding doesn't exist, skip it if (!p_layout_->HasBinding(binding)) { continue; } uint32_t start_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding).start; if (descriptors_[start_idx]->IsStorage()) { if (Image == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { image_set->insert(static_cast<ImageDescriptor *>(descriptors_[start_idx + i].get())->GetImageView()); num_updates++; } } } else if (TexelBuffer == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { auto bufferview = static_cast<TexelDescriptor *>(descriptors_[start_idx + i].get())->GetBufferView(); auto bv_state = device_data_->GetBufferViewState(bufferview); if (bv_state) { buffer_set->insert(bv_state->create_info.buffer); num_updates++; } } } } else if (GeneralBuffer == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { buffer_set->insert(static_cast<BufferDescriptor *>(descriptors_[start_idx + i].get())->GetBuffer()); num_updates++; } } } } } return num_updates; } // Set is being deleted or updates so invalidate all bound cmd buffers void cvdescriptorset::DescriptorSet::InvalidateBoundCmdBuffers() { device_data_->InvalidateCommandBuffers(cb_bindings, VulkanTypedHandle(set_, kVulkanObjectTypeDescriptorSet)); } // Loop through the write updates to do for a push descriptor set, ignoring dstSet void cvdescriptorset::DescriptorSet::PerformPushDescriptorsUpdate(uint32_t write_count, const VkWriteDescriptorSet *p_wds) { assert(IsPushDescriptor()); for (uint32_t i = 0; i < write_count; i++) { PerformWriteUpdate(&p_wds[i]); } } // Perform write update in given update struct void cvdescriptorset::DescriptorSet::PerformWriteUpdate(const VkWriteDescriptorSet *update) { // Perform update on a per-binding basis as consecutive updates roll over to next binding auto descriptors_remaining = update->descriptorCount; auto binding_being_updated = update->dstBinding; auto offset = update->dstArrayElement; uint32_t update_index = 0; while (descriptors_remaining) { uint32_t update_count = std::min(descriptors_remaining, GetDescriptorCountFromBinding(binding_being_updated)); auto global_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding_being_updated).start + offset; // Loop over the updates for a single binding at a time for (uint32_t di = 0; di < update_count; ++di, ++update_index) { descriptors_[global_idx + di]->WriteUpdate(update, update_index); } // Roll over to next binding in case of consecutive update descriptors_remaining -= update_count; offset = 0; binding_being_updated++; } if (update->descriptorCount) some_update_ = true; if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { InvalidateBoundCmdBuffers(); } } // Validate Copy update bool cvdescriptorset::ValidateCopyUpdate(const debug_report_data *report_data, const VkCopyDescriptorSet *update, const DescriptorSet *dst_set, const DescriptorSet *src_set, const char *func_name, std::string *error_code, std::string *error_msg) { auto dst_layout = dst_set->GetLayout(); auto src_layout = src_set->GetLayout(); // Verify dst layout still valid if (dst_layout->IsDestroyed()) { *error_code = "VUID-VkCopyDescriptorSet-dstSet-parameter"; string_sprintf(error_msg, "Cannot call %s to perform copy update on dstSet %s" " created with destroyed %s.", func_name, report_data->FormatHandle(dst_set->GetSet()).c_str(), report_data->FormatHandle(dst_layout->GetDescriptorSetLayout()).c_str()); return false; } // Verify src layout still valid if (src_layout->IsDestroyed()) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-parameter"; string_sprintf(error_msg, "Cannot call %s to perform copy update of dstSet %s" " from srcSet %s" " created with destroyed %s.", func_name, report_data->FormatHandle(dst_set->GetSet()).c_str(), report_data->FormatHandle(src_set->GetSet()).c_str(), report_data->FormatHandle(src_layout->GetDescriptorSetLayout()).c_str()); return false; } if (!dst_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-00347"; std::stringstream error_str; error_str << "DescriptorSet " << dst_set->GetSet() << " does not have copy update dest binding of " << update->dstBinding; *error_msg = error_str.str(); return false; } if (!src_set->HasBinding(update->srcBinding)) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-00345"; std::stringstream error_str; error_str << "DescriptorSet " << dst_set->GetSet() << " does not have copy update src binding of " << update->srcBinding; *error_msg = error_str.str(); return false; } // Verify idle ds if (dst_set->in_use.load() && !(dst_layout->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { // TODO : Re-using Free Idle error code, need copy update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform copy update on descriptor set " << dst_set->GetSet() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // src & dst set bindings are valid // Check bounds of src & dst auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; if ((src_start_idx + update->descriptorCount) > src_set->GetTotalDescriptorCount()) { // SRC update out of bounds *error_code = "VUID-VkCopyDescriptorSet-srcArrayElement-00346"; std::stringstream error_str; error_str << "Attempting copy update from descriptorSet " << update->srcSet << " binding#" << update->srcBinding << " with offset index of " << src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start << " plus update array offset of " << update->srcArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << src_set->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } auto dst_start_idx = dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; if ((dst_start_idx + update->descriptorCount) > dst_layout->GetTotalDescriptorCount()) { // DST update out of bounds *error_code = "VUID-VkCopyDescriptorSet-dstArrayElement-00348"; std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << dst_set->GetSet() << " binding#" << update->dstBinding << " with offset index of " << dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start << " plus update array offset of " << update->dstArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << dst_layout->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } // Check that types match // TODO : Base default error case going from here is "VUID-VkAcquireNextImageInfoKHR-semaphore-parameter"2ba which covers all // consistency issues, need more fine-grained error codes *error_code = "VUID-VkCopyDescriptorSet-srcSet-00349"; auto src_type = src_set->GetTypeFromBinding(update->srcBinding); auto dst_type = dst_layout->GetTypeFromBinding(update->dstBinding); if (src_type != dst_type) { std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << dst_set->GetSet() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(dst_type) << " from descriptorSet " << src_set->GetSet() << " binding #" << update->srcBinding << " with type " << string_VkDescriptorType(src_type) << ". Types do not match"; *error_msg = error_str.str(); return false; } // Verify consistency of src & dst bindings if update crosses binding boundaries if ((!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(src_layout.get(), update->srcBinding), update->srcArrayElement, update->descriptorCount, "copy update from", src_set->GetSet(), error_msg)) || (!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(dst_layout.get(), update->dstBinding), update->dstArrayElement, update->descriptorCount, "copy update to", dst_set->GetSet(), error_msg))) { return false; } if ((src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) && !(dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01918"; std::stringstream error_str; error_str << "If pname:srcSet's (" << update->srcSet << ") layout was created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag " "set, then pname:dstSet's (" << update->dstSet << ") layout must: also have been created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (!(src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) && (dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01919"; std::stringstream error_str; error_str << "If pname:srcSet's (" << update->srcSet << ") layout was created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag " "set, then pname:dstSet's (" << update->dstSet << ") layout must: also have been created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if ((src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) && !(dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01920"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet << ") was allocated was created " "with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag " "set, then the descriptor pool from which pname:dstSet (" << update->dstSet << ") was allocated must: " "also have been created with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (!(src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) && (dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01921"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet << ") was allocated was created " "without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag " "set, then the descriptor pool from which pname:dstSet (" << update->dstSet << ") was allocated must: " "also have been created without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (src_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->srcArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02223"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "srcArrayElement " << update->srcArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-02224"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02225"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Update parameters all look good and descriptor updated so verify update contents if (!VerifyCopyUpdateContents(dst_set->GetDeviceData(), update, src_set, src_type, src_start_idx, func_name, error_code, error_msg)) return false; // All checks passed so update is good return true; } // Perform Copy update void cvdescriptorset::DescriptorSet::PerformCopyUpdate(const VkCopyDescriptorSet *update, const DescriptorSet *src_set) { auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; auto dst_start_idx = p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; // Update parameters all look good so perform update for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto src = src_set->descriptors_[src_start_idx + di].get(); auto dst = descriptors_[dst_start_idx + di].get(); if (src->updated) { dst->CopyUpdate(src); some_update_ = true; } else { dst->updated = false; } } if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { InvalidateBoundCmdBuffers(); } } // Update the drawing state for the affected descriptors. // Set cb_node to this set and this set to cb_node. // Add the bindings of the descriptor // Set the layout based on the current descriptor layout (will mask subsequent layer mismatch errors) // TODO: Modify the UpdateDrawState virtural functions to *only* set initial layout and not change layouts // Prereq: This should be called for a set that has been confirmed to be active for the given cb_node, meaning it's going // to be used in a draw by the given cb_node void cvdescriptorset::DescriptorSet::UpdateDrawState(CoreChecks *device_data, CMD_BUFFER_STATE *cb_node, const std::map<uint32_t, descriptor_req> &binding_req_map) { // bind cb to this descriptor set cb_bindings.insert(cb_node); // Add bindings for descriptor set, the set's pool, and individual objects in the set cb_node->object_bindings.emplace(set_, kVulkanObjectTypeDescriptorSet); pool_state_->cb_bindings.insert(cb_node); cb_node->object_bindings.emplace(pool_state_->pool, kVulkanObjectTypeDescriptorPool); // For the active slots, use set# to look up descriptorSet from boundDescriptorSets, and bind all of that descriptor set's // resources for (auto binding_req_pair : binding_req_map) { auto binding = binding_req_pair.first; // We aren't validating descriptors created with PARTIALLY_BOUND or UPDATE_AFTER_BIND, so don't record state if (p_layout_->GetDescriptorBindingFlagsFromBinding(binding) & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT)) { continue; } auto range = p_layout_->GetGlobalIndexRangeFromBinding(binding); for (uint32_t i = range.start; i < range.end; ++i) { descriptors_[i]->UpdateDrawState(device_data, cb_node); } } } void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair, const BindingReqMap &in_req, BindingReqMap *out_req, TrackedBindings *bindings) { assert(out_req); assert(bindings); const auto binding = binding_req_pair.first; // Use insert and look at the boolean ("was inserted") in the returned pair to see if this is a new set member. // Saves one hash lookup vs. find ... compare w/ end ... insert. const auto it_bool_pair = bindings->insert(binding); if (it_bool_pair.second) { out_req->emplace(binding_req_pair); } } void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair, const BindingReqMap &in_req, BindingReqMap *out_req, TrackedBindings *bindings, uint32_t limit) { if (bindings->size() < limit) FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, bindings); } void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(CMD_BUFFER_STATE *cb_state, const BindingReqMap &in_req, BindingReqMap *out_req) { TrackedBindings &bound = cached_validation_[cb_state].command_binding_and_usage; if (bound.size() == GetBindingCount()) { return; // All bindings are bound, out req is empty } for (const auto &binding_req_pair : in_req) { const auto binding = binding_req_pair.first; // If a binding doesn't exist, or has already been bound, skip it if (p_layout_->HasBinding(binding)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, &bound); } } } void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(CMD_BUFFER_STATE *cb_state, PIPELINE_STATE *pipeline, const BindingReqMap &in_req, BindingReqMap *out_req) { auto &validated = cached_validation_[cb_state]; auto &image_sample_val = validated.image_samplers[pipeline]; auto *const dynamic_buffers = &validated.dynamic_buffers; auto *const non_dynamic_buffers = &validated.non_dynamic_buffers; const auto &stats = p_layout_->GetBindingTypeStats(); for (const auto &binding_req_pair : in_req) { auto binding = binding_req_pair.first; VkDescriptorSetLayoutBinding const *layout_binding = p_layout_->GetDescriptorSetLayoutBindingPtrFromBinding(binding); if (!layout_binding) { continue; } // Caching criteria differs per type. // If image_layout have changed , the image descriptors need to be validated against them. if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || (layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, dynamic_buffers, stats.dynamic_buffer_count); } else if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, non_dynamic_buffers, stats.non_dynamic_buffer_count); } else { // This is rather crude, as the changed layouts may not impact the bound descriptors, // but the simple "versioning" is a simple "dirt" test. auto &version = image_sample_val[binding]; // Take advantage of default construtor zero initialzing new entries if (version != cb_state->image_layout_change_count) { version = cb_state->image_layout_change_count; out_req->emplace(binding_req_pair); } } } } cvdescriptorset::SamplerDescriptor::SamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false) { updated = false; descriptor_class = PlainSampler; if (immut) { sampler_ = *immut; immutable_ = true; updated = true; } } // Validate given sampler. Currently this only checks to make sure it exists in the samplerMap bool cvdescriptorset::ValidateSampler(const VkSampler sampler, CoreChecks *dev_data) { return (dev_data->GetSamplerState(sampler) != nullptr); } bool cvdescriptorset::ValidateImageUpdate(VkImageView image_view, VkImageLayout image_layout, VkDescriptorType type, CoreChecks *dev_data, const char *func_name, std::string *error_code, std::string *error_msg) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00326"; auto iv_state = dev_data->GetImageViewState(image_view); if (!iv_state) { std::stringstream error_str; error_str << "Invalid VkImageView: " << dev_data->report_data->FormatHandle(image_view).c_str(); *error_msg = error_str.str(); return false; } // Note that when an imageview is created, we validated that memory is bound so no need to re-check here // Validate that imageLayout is compatible with aspect_mask and image format // and validate that image usage bits are correct for given usage VkImageAspectFlags aspect_mask = iv_state->create_info.subresourceRange.aspectMask; VkImage image = iv_state->create_info.image; VkFormat format = VK_FORMAT_MAX_ENUM; VkImageUsageFlags usage = 0; auto image_node = dev_data->GetImageState(image); if (image_node) { format = image_node->createInfo.format; usage = image_node->createInfo.usage; // Validate that memory is bound to image // TODO: This should have its own valid usage id apart from 2524 which is from CreateImageView case. The only // the error here occurs is if memory bound to a created imageView has been freed. if (dev_data->ValidateMemoryIsBoundToImage(image_node, func_name, "VUID-VkImageViewCreateInfo-image-01020")) { *error_code = "VUID-VkImageViewCreateInfo-image-01020"; *error_msg = "No memory bound to image."; return false; } // KHR_maintenance1 allows rendering into 2D or 2DArray views which slice a 3D image, // but not binding them to descriptor sets. if (image_node->createInfo.imageType == VK_IMAGE_TYPE_3D && (iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D || iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { *error_code = "VUID-VkDescriptorImageInfo-imageView-00343"; *error_msg = "ImageView must not be a 2D or 2DArray view of a 3D image"; return false; } } // First validate that format and layout are compatible if (format == VK_FORMAT_MAX_ENUM) { std::stringstream error_str; error_str << "Invalid image (" << dev_data->report_data->FormatHandle(image).c_str() << ") in imageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ")."; *error_msg = error_str.str(); return false; } // TODO : The various image aspect and format checks here are based on general spec language in 11.5 Image Views section under // vkCreateImageView(). What's the best way to create unique id for these cases? bool ds = FormatIsDepthOrStencil(format); switch (image_layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: // Only Color bit must be set if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but does not have VK_IMAGE_ASPECT_COLOR_BIT set."; *error_msg = error_str.str(); return false; } // format must NOT be DS if (ds) { std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but the image format is " << string_VkFormat(format) << " which is not a color format."; *error_msg = error_str.str(); return false; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: // Depth or stencil bit must be set, but both must NOT be set if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") has both STENCIL and DEPTH aspects set"; *error_msg = error_str.str(); return false; } } else if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) { // Neither were set std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") has layout " << string_VkImageLayout(image_layout) << " but does not have STENCIL or DEPTH aspects set"; *error_msg = error_str.str(); return false; } // format must be DS if (!ds) { std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") has layout " << string_VkImageLayout(image_layout) << " but the image format is " << string_VkFormat(format) << " which is not a depth/stencil format."; *error_msg = error_str.str(); return false; } break; default: // For other layouts if the source is depth/stencil image, both aspect bits must not be set if (ds) { if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") has layout " << string_VkImageLayout(image_layout) << " and is using depth/stencil image of format " << string_VkFormat(format) << " but it has both STENCIL and DEPTH aspects set, which is illegal. When using a depth/stencil " "image in a descriptor set, please only set either VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT depending on whether it will be used for depth reads or stencil " "reads respectively."; *error_msg = error_str.str(); return false; } } } break; } // Now validate that usage flags are correctly set for given type of update // As we're switching per-type, if any type has specific layout requirements, check those here as well // TODO : The various image usage bit requirements are in general spec language for VkImageUsageFlags bit block in 11.3 Images // under vkCreateImage() // TODO : Need to also validate case "VUID-VkWriteDescriptorSet-descriptorType-00336" where STORAGE_IMAGE & INPUT_ATTACH types // must have been created with identify swizzle const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { if (!(usage & VK_IMAGE_USAGE_SAMPLED_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_SAMPLED_BIT"; } break; } case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { if (!(usage & VK_IMAGE_USAGE_STORAGE_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_STORAGE_BIT"; } else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) { std::stringstream error_str; // TODO : Need to create custom enum error codes for these cases if (image_node->shared_presentable) { if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != image_layout) { error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type with a front-buffered image is being updated with " "layout " << string_VkImageLayout(image_layout) << " but according to spec section 13.1 Descriptor Types, 'Front-buffered images that report " "support for VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT must be in the " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR layout.'"; *error_msg = error_str.str(); return false; } } else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) { error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout " << string_VkImageLayout(image_layout) << " but according to spec section 13.1 Descriptor Types, 'Load and store operations on storage " "images can only be done on images in VK_IMAGE_LAYOUT_GENERAL layout.'"; *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { if (!(usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"; } break; } default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "ImageView (" << dev_data->report_data->FormatHandle(image_view).c_str() << ") with usage mask 0x" << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } if ((type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) { // Test that the layout is compatible with the descriptorType for the two sampled image types const static std::array<VkImageLayout, 3> valid_layouts = { {VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}}; struct ExtensionLayout { VkImageLayout layout; bool DeviceExtensions::*extension; }; const static std::array<ExtensionLayout, 3> extended_layouts{ {// Note double brace req'd for aggregate initialization {VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, &DeviceExtensions::vk_khr_shared_presentable_image}, {VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}, {VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}}}; auto is_layout = [image_layout, dev_data](const ExtensionLayout &ext_layout) { return dev_data->device_extensions.*(ext_layout.extension) && (ext_layout.layout == image_layout); }; bool valid_layout = (std::find(valid_layouts.cbegin(), valid_layouts.cend(), image_layout) != valid_layouts.cend()) || std::any_of(extended_layouts.cbegin(), extended_layouts.cend(), is_layout); if (!valid_layout) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01403"; std::stringstream error_str; error_str << "Descriptor update with descriptorType " << string_VkDescriptorType(type) << " is being updated with invalid imageLayout " << string_VkImageLayout(image_layout) << " for image " << dev_data->report_data->FormatHandle(image).c_str() << " in imageView " << dev_data->report_data->FormatHandle(image_view).c_str() << ". Allowed layouts are: VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " << "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL"; for (auto &ext_layout : extended_layouts) { if (dev_data->device_extensions.*(ext_layout.extension)) { error_str << ", " << string_VkImageLayout(ext_layout.layout); } } *error_msg = error_str.str(); return false; } } return true; } void cvdescriptorset::SamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { if (!immutable_) { sampler_ = update->pImageInfo[index].sampler; } updated = true; } void cvdescriptorset::SamplerDescriptor::CopyUpdate(const Descriptor *src) { if (!immutable_) { auto update_sampler = static_cast<const SamplerDescriptor *>(src)->sampler_; sampler_ = update_sampler; } updated = true; } void cvdescriptorset::SamplerDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { if (!immutable_) { auto sampler_state = dev_data->GetSamplerState(sampler_); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } } cvdescriptorset::ImageSamplerDescriptor::ImageSamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = ImageSampler; if (immut) { sampler_ = *immut; immutable_ = true; } } void cvdescriptorset::ImageSamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; if (!immutable_) { sampler_ = image_info.sampler; } image_view_ = image_info.imageView; image_layout_ = image_info.imageLayout; } void cvdescriptorset::ImageSamplerDescriptor::CopyUpdate(const Descriptor *src) { if (!immutable_) { auto update_sampler = static_cast<const ImageSamplerDescriptor *>(src)->sampler_; sampler_ = update_sampler; } auto image_view = static_cast<const ImageSamplerDescriptor *>(src)->image_view_; auto image_layout = static_cast<const ImageSamplerDescriptor *>(src)->image_layout_; updated = true; image_view_ = image_view; image_layout_ = image_layout; } void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { // First add binding for any non-immutable sampler if (!immutable_) { auto sampler_state = dev_data->GetSamplerState(sampler_); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } // Add binding for image auto iv_state = dev_data->GetImageViewState(image_view_); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->SetImageViewInitialLayout(cb_node, *iv_state, image_layout_); } } cvdescriptorset::ImageDescriptor::ImageDescriptor(const VkDescriptorType type) : storage_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = Image; if (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == type) storage_ = true; } void cvdescriptorset::ImageDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; image_view_ = image_info.imageView; image_layout_ = image_info.imageLayout; } void cvdescriptorset::ImageDescriptor::CopyUpdate(const Descriptor *src) { auto image_view = static_cast<const ImageDescriptor *>(src)->image_view_; auto image_layout = static_cast<const ImageDescriptor *>(src)->image_layout_; updated = true; image_view_ = image_view; image_layout_ = image_layout; } void cvdescriptorset::ImageDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { // Add binding for image auto iv_state = dev_data->GetImageViewState(image_view_); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->SetImageViewInitialLayout(cb_node, *iv_state, image_layout_); } } cvdescriptorset::BufferDescriptor::BufferDescriptor(const VkDescriptorType type) : storage_(false), dynamic_(false), buffer_(VK_NULL_HANDLE), offset_(0), range_(0) { updated = false; descriptor_class = GeneralBuffer; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { dynamic_ = true; } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type) { storage_ = true; } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { dynamic_ = true; storage_ = true; } } void cvdescriptorset::BufferDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &buffer_info = update->pBufferInfo[index]; buffer_ = buffer_info.buffer; offset_ = buffer_info.offset; range_ = buffer_info.range; } void cvdescriptorset::BufferDescriptor::CopyUpdate(const Descriptor *src) { auto buff_desc = static_cast<const BufferDescriptor *>(src); updated = true; buffer_ = buff_desc->buffer_; offset_ = buff_desc->offset_; range_ = buff_desc->range_; } void cvdescriptorset::BufferDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { auto buffer_node = dev_data->GetBufferState(buffer_); if (buffer_node) dev_data->AddCommandBufferBindingBuffer(cb_node, buffer_node); } cvdescriptorset::TexelDescriptor::TexelDescriptor(const VkDescriptorType type) : buffer_view_(VK_NULL_HANDLE), storage_(false) { updated = false; descriptor_class = TexelBuffer; if (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == type) storage_ = true; } void cvdescriptorset::TexelDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; buffer_view_ = update->pTexelBufferView[index]; } void cvdescriptorset::TexelDescriptor::CopyUpdate(const Descriptor *src) { updated = true; buffer_view_ = static_cast<const TexelDescriptor *>(src)->buffer_view_; } void cvdescriptorset::TexelDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { auto bv_state = dev_data->GetBufferViewState(buffer_view_); if (bv_state) { dev_data->AddCommandBufferBindingBufferView(cb_node, bv_state); } } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Validate[Write|Copy]Update functions. // If the update hits an issue for which the callback returns "true", meaning that the call down the chain should // be skipped, then true is returned. // If there is no issue with the update, then false is returned. bool CoreChecks::ValidateUpdateDescriptorSets(uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds, const char *func_name) { bool skip = false; // Validate Write updates for (uint32_t i = 0; i < write_count; i++) { auto dest_set = p_wds[i].dstSet; auto set_node = GetSetNode(dest_set); if (!set_node) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dest_set), kVUID_Core_DrawState_InvalidDescriptorSet, "Cannot call %s on %s that has not been allocated.", func_name, report_data->FormatHandle(dest_set).c_str()); } else { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(set_node, report_data, &p_wds[i], func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dest_set), error_code, "%s failed write update validation for %s with error: %s.", func_name, report_data->FormatHandle(dest_set).c_str(), error_str.c_str()); } } } // Now validate copy updates for (uint32_t i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = GetSetNode(src_set); auto dst_node = GetSetNode(dst_set); // Object_tracker verifies that src & dest descriptor set are valid assert(src_node); assert(dst_node); std::string error_code; std::string error_str; if (!ValidateCopyUpdate(report_data, &p_cds[i], dst_node, src_node, func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dst_set), error_code, "%s failed copy update from %s to %s with error: %s.", func_name, report_data->FormatHandle(src_set).c_str(), report_data->FormatHandle(dst_set).c_str(), error_str.c_str()); } } return skip; } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Perform[Write|Copy]Update functions. // Prerequisite : ValidateUpdateDescriptorSets() should be called and return "false" prior to calling PerformUpdateDescriptorSets() // with the same set of updates. // This is split from the validate code to allow validation prior to calling down the chain, and then update after // calling down the chain. void cvdescriptorset::PerformUpdateDescriptorSets(CoreChecks *dev_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds) { // Write updates first uint32_t i = 0; for (i = 0; i < write_count; ++i) { auto dest_set = p_wds[i].dstSet; auto set_node = dev_data->GetSetNode(dest_set); if (set_node) { set_node->PerformWriteUpdate(&p_wds[i]); } } // Now copy updates for (i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = dev_data->GetSetNode(src_set); auto dst_node = dev_data->GetSetNode(dst_set); if (src_node && dst_node) { dst_node->PerformCopyUpdate(&p_cds[i], src_node); } } } cvdescriptorset::DecodedTemplateUpdate::DecodedTemplateUpdate(CoreChecks *device_data, VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData, VkDescriptorSetLayout push_layout) { auto const &create_info = template_state->create_info; inline_infos.resize(create_info.descriptorUpdateEntryCount); // Make sure we have one if we need it desc_writes.reserve(create_info.descriptorUpdateEntryCount); // emplaced, so reserved without initialization VkDescriptorSetLayout effective_dsl = create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET ? create_info.descriptorSetLayout : push_layout; auto layout_obj = GetDescriptorSetLayout(device_data, effective_dsl); // Create a WriteDescriptorSet struct for each template update entry for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { auto binding_count = layout_obj->GetDescriptorCountFromBinding(create_info.pDescriptorUpdateEntries[i].dstBinding); auto binding_being_updated = create_info.pDescriptorUpdateEntries[i].dstBinding; auto dst_array_element = create_info.pDescriptorUpdateEntries[i].dstArrayElement; desc_writes.reserve(desc_writes.size() + create_info.pDescriptorUpdateEntries[i].descriptorCount); for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { desc_writes.emplace_back(); auto &write_entry = desc_writes.back(); size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; char *update_entry = (char *)(pData) + offset; if (dst_array_element >= binding_count) { dst_array_element = 0; binding_being_updated = layout_obj->GetNextValidBinding(binding_being_updated); } write_entry.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_entry.pNext = NULL; write_entry.dstSet = descriptorSet; write_entry.dstBinding = binding_being_updated; write_entry.dstArrayElement = dst_array_element; write_entry.descriptorCount = 1; write_entry.descriptorType = create_info.pDescriptorUpdateEntries[i].descriptorType; switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: write_entry.pImageInfo = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: write_entry.pBufferInfo = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: write_entry.pTexelBufferView = reinterpret_cast<VkBufferView *>(update_entry); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: { VkWriteDescriptorSetInlineUniformBlockEXT *inline_info = &inline_infos[i]; inline_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; inline_info->pNext = nullptr; inline_info->dataSize = create_info.pDescriptorUpdateEntries[i].descriptorCount; inline_info->pData = update_entry; write_entry.pNext = inline_info; // descriptorCount must match the dataSize member of the VkWriteDescriptorSetInlineUniformBlockEXT structure write_entry.descriptorCount = inline_info->dataSize; // skip the rest of the array, they just represent bytes in the update j = create_info.pDescriptorUpdateEntries[i].descriptorCount; break; } default: assert(0); break; } dst_array_element++; } } } // These helper functions carry out the validate and record descriptor updates peformed via update templates. They decode // the templatized data and leverage the non-template UpdateDescriptor helper functions. bool CoreChecks::ValidateUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); return ValidateUpdateDescriptorSets(static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL, "vkUpdateDescriptorSetWithTemplate()"); } void CoreChecks::PerformUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); cvdescriptorset::PerformUpdateDescriptorSets(this, static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL); } std::string cvdescriptorset::DescriptorSet::StringifySetAndLayout() const { std::string out; auto layout_handle = p_layout_->GetDescriptorSetLayout(); if (IsPushDescriptor()) { string_sprintf(&out, "Push Descriptors defined with VkDescriptorSetLayout %s", device_data_->report_data->FormatHandle(layout_handle).c_str()); } else { string_sprintf(&out, "VkDescriptorSet %s allocated with VkDescriptorSetLayout %s", device_data_->report_data->FormatHandle(set_).c_str(), device_data_->report_data->FormatHandle(layout_handle).c_str()); } return out; }; // Loop through the write updates to validate for a push descriptor set, ignoring dstSet bool cvdescriptorset::ValidatePushDescriptorsUpdate(const DescriptorSet *push_set, const debug_report_data *report_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds, const char *func_name) { assert(push_set->IsPushDescriptor()); bool skip = false; for (uint32_t i = 0; i < write_count; i++) { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(push_set, report_data, &p_wds[i], func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, HandleToUint64(push_set->GetDescriptorSetLayout()), error_code, "%s failed update validation: %s.", func_name, error_str.c_str()); } } return skip; } // For the given buffer, verify that its creation parameters are appropriate for the given type // If there's an error, update the error_msg string with details and return false, else return true bool cvdescriptorset::ValidateBufferUsage(BUFFER_STATE const *buffer_node, VkDescriptorType type, std::string *error_code, std::string *error_msg) { // Verify that usage bits set correctly for given type auto usage = buffer_node->createInfo.usage; const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00334"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00335"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00330"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00331"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT"; } break; default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "Buffer (" << buffer_node->buffer << ") with usage mask 0x" << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } return true; } // For buffer descriptor updates, verify the buffer usage and VkDescriptorBufferInfo struct which includes: // 1. buffer is valid // 2. buffer was created with correct usage flags // 3. offset is less than buffer size // 4. range is either VK_WHOLE_SIZE or falls in (0, (buffer size - offset)] // 5. range and offset are within the device's limits // If there's an error, update the error_msg string with details and return false, else return true bool cvdescriptorset::ValidateBufferUpdate(CoreChecks *device_data, VkDescriptorBufferInfo const *buffer_info, VkDescriptorType type, const char *func_name, std::string *error_code, std::string *error_msg) { // First make sure that buffer is valid auto buffer_node = device_data->GetBufferState(buffer_info->buffer); // Any invalid buffer should already be caught by object_tracker assert(buffer_node); if (device_data->ValidateMemoryIsBoundToBuffer(buffer_node, func_name, "VUID-VkWriteDescriptorSet-descriptorType-00329")) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00329"; *error_msg = "No memory bound to buffer."; return false; } // Verify usage bits if (!ValidateBufferUsage(buffer_node, type, error_code, error_msg)) { // error_msg will have been updated by ValidateBufferUsage() return false; } // offset must be less than buffer size if (buffer_info->offset >= buffer_node->createInfo.size) { *error_code = "VUID-VkDescriptorBufferInfo-offset-00340"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo offset of " << buffer_info->offset << " is greater than or equal to buffer " << buffer_node->buffer << " size of " << buffer_node->createInfo.size; *error_msg = error_str.str(); return false; } if (buffer_info->range != VK_WHOLE_SIZE) { // Range must be VK_WHOLE_SIZE or > 0 if (!buffer_info->range) { *error_code = "VUID-VkDescriptorBufferInfo-range-00341"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is not VK_WHOLE_SIZE and is zero, which is not allowed."; *error_msg = error_str.str(); return false; } // Range must be VK_WHOLE_SIZE or <= (buffer size - offset) if (buffer_info->range > (buffer_node->createInfo.size - buffer_info->offset)) { *error_code = "VUID-VkDescriptorBufferInfo-range-00342"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than buffer size (" << buffer_node->createInfo.size << ") minus requested offset of " << buffer_info->offset; *error_msg = error_str.str(); return false; } } // Check buffer update sizes against device limits const auto &limits = device_data->phys_dev_props.limits; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type || VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { auto max_ub_range = limits.maxUniformBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type || VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { auto max_sb_range = limits.maxStorageBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } } return true; } // Verify that the contents of the update are ok, but don't perform actual update bool cvdescriptorset::VerifyCopyUpdateContents(CoreChecks *device_data, const VkCopyDescriptorSet *update, const DescriptorSet *src_set, VkDescriptorType type, uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) { // Note : Repurposing some Write update error codes here as specific details aren't called out for copy updates like they are // for write updates switch (src_set->GetDescriptorFromGlobalIndex(index)->descriptor_class) { case PlainSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; if (!src_desc->IsImmutableSampler()) { auto update_sampler = static_cast<const SamplerDescriptor *>(src_desc)->GetSampler(); if (!ValidateSampler(update_sampler, device_data)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case ImageSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto img_samp_desc = static_cast<const ImageSamplerDescriptor *>(src_desc); // First validate sampler if (!img_samp_desc->IsImmutableSampler()) { auto update_sampler = img_samp_desc->GetSampler(); if (!ValidateSampler(update_sampler, device_data)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } // Validate image auto image_view = img_samp_desc->GetImageView(); auto image_layout = img_samp_desc->GetImageLayout(); if (!ValidateImageUpdate(image_view, image_layout, type, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case Image: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto img_desc = static_cast<const ImageDescriptor *>(src_desc); auto image_view = img_desc->GetImageView(); auto image_layout = img_desc->GetImageLayout(); if (!ValidateImageUpdate(image_view, image_layout, type, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case TexelBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto buffer_view = static_cast<const TexelDescriptor *>(src_desc)->GetBufferView(); auto bv_state = device_data->GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor with invalid buffer view: " << buffer_view; *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; if (!ValidateBufferUsage(device_data->GetBufferState(buffer), type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case GeneralBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto buffer = static_cast<const BufferDescriptor *>(src_desc)->GetBuffer(); if (!ValidateBufferUsage(device_data->GetBufferState(buffer), type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case InlineUniform: case AccelerationStructure: break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; } // Update the common AllocateDescriptorSetsData void CoreChecks::UpdateAllocateDescriptorSetsData(const VkDescriptorSetAllocateInfo *p_alloc_info, cvdescriptorset::AllocateDescriptorSetsData *ds_data) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (layout) { ds_data->layout_nodes[i] = layout; // Count total descriptors required per type for (uint32_t j = 0; j < layout->GetBindingCount(); ++j) { const auto &binding_layout = layout->GetDescriptorSetLayoutBindingPtrFromIndex(j); uint32_t typeIndex = static_cast<uint32_t>(binding_layout->descriptorType); ds_data->required_descriptors_by_type[typeIndex] += binding_layout->descriptorCount; } } // Any unknown layouts will be flagged as errors during ValidateAllocateDescriptorSets() call } } // Verify that the state at allocate time is correct, but don't actually allocate the sets yet bool CoreChecks::ValidateAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) { bool skip = false; auto pool_state = GetDescriptorPoolState(p_alloc_info->descriptorPool); for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (layout) { // nullptr layout indicates no valid layout handle for this device, validated/logged in object_tracker if (layout->IsPushDescriptor()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, HandleToUint64(p_alloc_info->pSetLayouts[i]), "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308", "%s specified at pSetLayouts[%" PRIu32 "] in vkAllocateDescriptorSets() was created with invalid flag %s set.", report_data->FormatHandle(p_alloc_info->pSetLayouts[i]).c_str(), i, "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR"); } if (layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT && !(pool_state->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044", "Descriptor set layout create flags and pool create flags mismatch for index (%d)", i); } } } if (!device_extensions.vk_khr_maintenance1) { // Track number of descriptorSets allowable in this pool if (pool_state->availableSets < p_alloc_info->descriptorSetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306", "Unable to allocate %u descriptorSets from %s" ". This pool only has %d descriptorSets remaining.", p_alloc_info->descriptorSetCount, report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableSets); } // Determine whether descriptor counts are satisfiable for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { if (ds_data->required_descriptors_by_type.at(it->first) > pool_state->availableDescriptorTypeCount[it->first]) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307", "Unable to allocate %u descriptors of type %s from %s" ". This pool only has %d descriptors of this type remaining.", ds_data->required_descriptors_by_type.at(it->first), string_VkDescriptorType(VkDescriptorType(it->first)), report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableDescriptorTypeCount[it->first]); } } } const auto *count_allocate_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext); if (count_allocate_info) { if (count_allocate_info->descriptorSetCount != 0 && count_allocate_info->descriptorSetCount != p_alloc_info->descriptorSetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-descriptorSetCount-03045", "VkDescriptorSetAllocateInfo::descriptorSetCount (%d) != " "VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::descriptorSetCount (%d)", p_alloc_info->descriptorSetCount, count_allocate_info->descriptorSetCount); } if (count_allocate_info->descriptorSetCount == p_alloc_info->descriptorSetCount) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (count_allocate_info->pDescriptorCounts[i] > layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046", "pDescriptorCounts[%d] = (%d), binding's descriptorCount = (%d)", i, count_allocate_info->pDescriptorCounts[i], layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())); } } } } return skip; } // Decrement allocated sets from the pool and insert new sets into set_map void CoreChecks::PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const VkDescriptorSet *descriptor_sets, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) { auto pool_state = descriptorPoolMap[p_alloc_info->descriptorPool].get(); // Account for sets and individual descriptors allocated from pool pool_state->availableSets -= p_alloc_info->descriptorSetCount; for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { pool_state->availableDescriptorTypeCount[it->first] -= ds_data->required_descriptors_by_type.at(it->first); } const auto *variable_count_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext); bool variable_count_valid = variable_count_info && variable_count_info->descriptorSetCount == p_alloc_info->descriptorSetCount; // Create tracking object for each descriptor set; insert into global map and the pool's set. for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { uint32_t variable_count = variable_count_valid ? variable_count_info->pDescriptorCounts[i] : 0; std::unique_ptr<cvdescriptorset::DescriptorSet> new_ds(new cvdescriptorset::DescriptorSet( descriptor_sets[i], p_alloc_info->descriptorPool, ds_data->layout_nodes[i], variable_count, this)); pool_state->sets.insert(new_ds.get()); new_ds->in_use.store(0); setMap[descriptor_sets[i]] = std::move(new_ds); } } cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map, CMD_BUFFER_STATE *cb_state) : filtered_map_(), orig_map_(in_map) { if (ds.GetTotalDescriptorCount() > kManyDescriptors_) { filtered_map_.reset(new std::map<uint32_t, descriptor_req>()); ds.FilterAndTrackBindingReqs(cb_state, orig_map_, filtered_map_.get()); } } cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map, CMD_BUFFER_STATE *cb_state, PIPELINE_STATE *pipeline) : filtered_map_(), orig_map_(in_map) { if (ds.GetTotalDescriptorCount() > kManyDescriptors_) { filtered_map_.reset(new std::map<uint32_t, descriptor_req>()); ds.FilterAndTrackBindingReqs(cb_state, pipeline, orig_map_, filtered_map_.get()); } } // Starting at offset descriptor of given binding, parse over update_count // descriptor updates and verify that for any binding boundaries that are crossed, the next binding(s) are all consistent // Consistency means that their type, stage flags, and whether or not they use immutable samplers matches // If so, return true. If not, fill in error_msg and return false bool cvdescriptorset::VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator current_binding, uint32_t offset, uint32_t update_count, const char *type, const VkDescriptorSet set, std::string *error_msg) { // Verify consecutive bindings match (if needed) auto orig_binding = current_binding; // Track count of descriptors in the current_bindings that are remaining to be updated auto binding_remaining = current_binding.GetDescriptorCount(); // First, it's legal to offset beyond your own binding so handle that case // Really this is just searching for the binding in which the update begins and adjusting offset accordingly while (offset >= binding_remaining && !current_binding.AtEnd()) { // Advance to next binding, decrement offset by binding size offset -= binding_remaining; ++current_binding; binding_remaining = current_binding.GetDescriptorCount(); // Accessors are safe if AtEnd } assert(!current_binding.AtEnd()); // As written assumes range check has been made before calling binding_remaining -= offset; while (update_count > binding_remaining) { // While our updates overstep current binding // Verify next consecutive binding matches type, stage flags & immutable sampler use auto next_binding = current_binding.Next(); if (!current_binding.IsConsistent(next_binding)) { std::stringstream error_str; error_str << "Attempting " << type; if (current_binding.Layout()->IsPushDescriptor()) { error_str << " push descriptors"; } else { error_str << " descriptor set " << set; } error_str << " binding #" << orig_binding.Binding() << " with #" << update_count << " descriptors being updated but this update oversteps the bounds of this binding and the next binding is " "not consistent with current binding so this update is invalid."; *error_msg = error_str.str(); return false; } current_binding = next_binding; // For sake of this check consider the bindings updated and grab count for next binding update_count -= binding_remaining; binding_remaining = current_binding.GetDescriptorCount(); } return true; } // Validate the state for a given write update but don't actually perform the update // If an error would occur for this update, return false and fill in details in error_msg string bool cvdescriptorset::ValidateWriteUpdate(const DescriptorSet *dest_set, const debug_report_data *report_data, const VkWriteDescriptorSet *update, const char *func_name, std::string *error_code, std::string *error_msg) { const auto dest_layout = dest_set->GetLayout(); // Verify dst layout still valid if (dest_layout->IsDestroyed()) { *error_code = "VUID-VkWriteDescriptorSet-dstSet-00320"; string_sprintf(error_msg, "Cannot call %s to perform write update on %s which has been destroyed", func_name, dest_set->StringifySetAndLayout().c_str()); return false; } // Verify dst binding exists if (!dest_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00315"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " does not have binding " << update->dstBinding; *error_msg = error_str.str(); return false; } DescriptorSetLayout::ConstBindingIterator dest(dest_layout.get(), update->dstBinding); // Make sure binding isn't empty if (0 == dest.GetDescriptorCount()) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00316"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " cannot updated binding " << update->dstBinding << " that has 0 descriptors"; *error_msg = error_str.str(); return false; } // Verify idle ds if (dest_set->in_use.load() && !(dest.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { // TODO : Re-using Free Idle error code, need write update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform write update on " << dest_set->StringifySetAndLayout() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // We know that binding is valid, verify update and do update on each descriptor auto start_idx = dest.GetGlobalIndexRange().start + update->dstArrayElement; auto type = dest.GetType(); if (type != update->descriptorType) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00319"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(type) << " but update type is " << string_VkDescriptorType(update->descriptorType); *error_msg = error_str.str(); return false; } auto total_descriptors = dest_layout->GetTotalDescriptorCount(); if (update->descriptorCount > (total_descriptors - start_idx)) { *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << total_descriptors - start_idx << " descriptors in that binding and all successive bindings of the set, but update of " << update->descriptorCount << " descriptors combined with update array element offset of " << update->dstArrayElement << " oversteps the available number of consecutive descriptors"; *error_msg = error_str.str(); return false; } if (type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02219"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02220"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } const auto *write_inline_info = lvl_find_in_chain<VkWriteDescriptorSetInlineUniformBlockEXT>(update->pNext); if (!write_inline_info || write_inline_info->dataSize != update->descriptorCount) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02221"; std::stringstream error_str; if (!write_inline_info) { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT missing"; } else { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not equal to " << "VkWriteDescriptorSet descriptorCount " << update->descriptorCount; } *error_msg = error_str.str(); return false; } // This error is probably unreachable due to the previous two errors if (write_inline_info && (write_inline_info->dataSize % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSetInlineUniformBlockEXT-dataSize-02222"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Verify consecutive bindings match (if needed) if (!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(dest_layout.get(), update->dstBinding), update->dstArrayElement, update->descriptorCount, "write update to", dest_set->GetSet(), error_msg)) { // TODO : Should break out "consecutive binding updates" language into valid usage statements *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; return false; } // Update is within bounds and consistent so last step is to validate update contents if (!VerifyWriteUpdateContents(dest_set, update, start_idx, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " failed with error message: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } // All checks passed, update is clean return true; } // Verify that the contents of the update are ok, but don't perform actual update bool cvdescriptorset::VerifyWriteUpdateContents(const DescriptorSet *dest_set, const VkWriteDescriptorSet *update, const uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) { auto *device_data = dest_set->GetDeviceData(); switch (update->descriptorType) { case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { // Validate image auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } if (device_data->device_extensions.vk_khr_sampler_ycbcr_conversion) { ImageSamplerDescriptor *desc = (ImageSamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (desc->IsImmutableSampler()) { auto sampler_state = device_data->GetSamplerState(desc->GetSampler()); auto iv_state = device_data->GetImageViewState(image_view); if (iv_state && sampler_state) { if (iv_state->samplerConversion != sampler_state->samplerConversion) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01948"; std::stringstream error_str; error_str << "Attempted write update to combined image sampler and image view and sampler ycbcr " "conversions are not identical, sampler: " << desc->GetSampler() << " image view: " << iv_state->image_view << "."; *error_msg = error_str.str(); return false; } } } else { auto iv_state = device_data->GetImageViewState(image_view); if (iv_state && (iv_state->samplerConversion != VK_NULL_HANDLE)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01947"; std::stringstream error_str; error_str << "Because dstSet (" << update->dstSet << ") is bound to image view (" << iv_state->image_view << ") that includes a YCBCR conversion, it must have been allocated with a layout that " "includes an immutable sampler."; *error_msg = error_str.str(); return false; } } } } } // fall through case VK_DESCRIPTOR_TYPE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { SamplerDescriptor *desc = (SamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (!desc->IsImmutableSampler()) { if (!ValidateSampler(update->pImageInfo[di].sampler, device_data)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted write update to sampler descriptor with invalid sampler: " << update->pImageInfo[di].sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto buffer_view = update->pTexelBufferView[di]; auto bv_state = device_data->GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor with invalid buffer view: " << buffer_view; *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; auto buffer_state = device_data->GetBufferState(buffer); // Verify that buffer underlying the view hasn't been destroyed prematurely if (!buffer_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed because underlying buffer (" << buffer << ") has been destroyed: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } else if (!ValidateBufferUsage(buffer_state, update->descriptorType, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { if (!ValidateBufferUpdate(device_data, update->pBufferInfo + di, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: // XXX TODO break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; }
1
11,096
The point of the exercise has been to eliminate CoreChecks as an object dependency for DescriptorSet et. al.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -34,7 +34,7 @@ import static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING; /** * Context object with optional arguments for a Flink Scan. */ -class ScanContext implements Serializable { +public class ScanContext implements Serializable { private static final long serialVersionUID = 1L;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.flink.source; import java.io.Serializable; import java.time.Duration; import java.util.List; import java.util.Map; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.Configuration; import org.apache.iceberg.Schema; import org.apache.iceberg.expressions.Expression; import static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING; /** * Context object with optional arguments for a Flink Scan. */ class ScanContext implements Serializable { private static final long serialVersionUID = 1L; private static final ConfigOption<Long> SNAPSHOT_ID = ConfigOptions.key("snapshot-id").longType().defaultValue(null); private static final ConfigOption<Boolean> CASE_SENSITIVE = ConfigOptions.key("case-sensitive").booleanType().defaultValue(false); private static final ConfigOption<Long> AS_OF_TIMESTAMP = ConfigOptions.key("as-of-timestamp").longType().defaultValue(null); private static final ConfigOption<Long> START_SNAPSHOT_ID = ConfigOptions.key("start-snapshot-id").longType().defaultValue(null); private static final ConfigOption<Long> END_SNAPSHOT_ID = ConfigOptions.key("end-snapshot-id").longType().defaultValue(null); private static final ConfigOption<Long> SPLIT_SIZE = ConfigOptions.key("split-size").longType().defaultValue(null); private static final ConfigOption<Integer> SPLIT_LOOKBACK = ConfigOptions.key("split-lookback").intType().defaultValue(null); private static final ConfigOption<Long> SPLIT_FILE_OPEN_COST = ConfigOptions.key("split-file-open-cost").longType().defaultValue(null); private static final ConfigOption<Boolean> STREAMING = ConfigOptions.key("streaming").booleanType().defaultValue(false); private static final ConfigOption<Duration> MONITOR_INTERVAL = ConfigOptions.key("monitor-interval").durationType().defaultValue(Duration.ofSeconds(10)); private final boolean caseSensitive; private final Long snapshotId; private final Long startSnapshotId; private final Long endSnapshotId; private final Long asOfTimestamp; private final Long splitSize; private final Integer splitLookback; private final Long splitOpenFileCost; private final boolean isStreaming; private final Duration monitorInterval; private final String nameMapping; private final Schema schema; private final List<Expression> filters; private final long limit; private ScanContext(boolean caseSensitive, Long snapshotId, Long startSnapshotId, Long endSnapshotId, Long asOfTimestamp, Long splitSize, Integer splitLookback, Long splitOpenFileCost, boolean isStreaming, Duration monitorInterval, String nameMapping, Schema schema, List<Expression> filters, long limit) { this.caseSensitive = caseSensitive; this.snapshotId = snapshotId; this.startSnapshotId = startSnapshotId; this.endSnapshotId = endSnapshotId; this.asOfTimestamp = asOfTimestamp; this.splitSize = splitSize; this.splitLookback = splitLookback; this.splitOpenFileCost = splitOpenFileCost; this.isStreaming = isStreaming; this.monitorInterval = monitorInterval; this.nameMapping = nameMapping; this.schema = schema; this.filters = filters; this.limit = limit; } boolean caseSensitive() { return caseSensitive; } Long snapshotId() { return snapshotId; } Long startSnapshotId() { return startSnapshotId; } Long endSnapshotId() { return endSnapshotId; } Long asOfTimestamp() { return asOfTimestamp; } Long splitSize() { return splitSize; } Integer splitLookback() { return splitLookback; } Long splitOpenFileCost() { return splitOpenFileCost; } boolean isStreaming() { return isStreaming; } Duration monitorInterval() { return monitorInterval; } String nameMapping() { return nameMapping; } Schema project() { return schema; } List<Expression> filters() { return filters; } long limit() { return limit; } ScanContext copyWithAppendsBetween(long newStartSnapshotId, long newEndSnapshotId) { return ScanContext.builder() .caseSensitive(caseSensitive) .useSnapshotId(null) .startSnapshotId(newStartSnapshotId) .endSnapshotId(newEndSnapshotId) .asOfTimestamp(null) .splitSize(splitSize) .splitLookback(splitLookback) .splitOpenFileCost(splitOpenFileCost) .streaming(isStreaming) .monitorInterval(monitorInterval) .nameMapping(nameMapping) .project(schema) .filters(filters) .limit(limit) .build(); } ScanContext copyWithSnapshotId(long newSnapshotId) { return ScanContext.builder() .caseSensitive(caseSensitive) .useSnapshotId(newSnapshotId) .startSnapshotId(null) .endSnapshotId(null) .asOfTimestamp(null) .splitSize(splitSize) .splitLookback(splitLookback) .splitOpenFileCost(splitOpenFileCost) .streaming(isStreaming) .monitorInterval(monitorInterval) .nameMapping(nameMapping) .project(schema) .filters(filters) .limit(limit) .build(); } static Builder builder() { return new Builder(); } static class Builder { private boolean caseSensitive = CASE_SENSITIVE.defaultValue(); private Long snapshotId = SNAPSHOT_ID.defaultValue(); private Long startSnapshotId = START_SNAPSHOT_ID.defaultValue(); private Long endSnapshotId = END_SNAPSHOT_ID.defaultValue(); private Long asOfTimestamp = AS_OF_TIMESTAMP.defaultValue(); private Long splitSize = SPLIT_SIZE.defaultValue(); private Integer splitLookback = SPLIT_LOOKBACK.defaultValue(); private Long splitOpenFileCost = SPLIT_FILE_OPEN_COST.defaultValue(); private boolean isStreaming = STREAMING.defaultValue(); private Duration monitorInterval = MONITOR_INTERVAL.defaultValue(); private String nameMapping; private Schema projectedSchema; private List<Expression> filters; private long limit = -1L; private Builder() { } Builder caseSensitive(boolean newCaseSensitive) { this.caseSensitive = newCaseSensitive; return this; } Builder useSnapshotId(Long newSnapshotId) { this.snapshotId = newSnapshotId; return this; } Builder startSnapshotId(Long newStartSnapshotId) { this.startSnapshotId = newStartSnapshotId; return this; } Builder endSnapshotId(Long newEndSnapshotId) { this.endSnapshotId = newEndSnapshotId; return this; } Builder asOfTimestamp(Long newAsOfTimestamp) { this.asOfTimestamp = newAsOfTimestamp; return this; } Builder splitSize(Long newSplitSize) { this.splitSize = newSplitSize; return this; } Builder splitLookback(Integer newSplitLookback) { this.splitLookback = newSplitLookback; return this; } Builder splitOpenFileCost(Long newSplitOpenFileCost) { this.splitOpenFileCost = newSplitOpenFileCost; return this; } Builder streaming(boolean streaming) { this.isStreaming = streaming; return this; } Builder monitorInterval(Duration newMonitorInterval) { this.monitorInterval = newMonitorInterval; return this; } Builder nameMapping(String newNameMapping) { this.nameMapping = newNameMapping; return this; } Builder project(Schema newProjectedSchema) { this.projectedSchema = newProjectedSchema; return this; } Builder filters(List<Expression> newFilters) { this.filters = newFilters; return this; } Builder limit(long newLimit) { this.limit = newLimit; return this; } Builder fromProperties(Map<String, String> properties) { Configuration config = new Configuration(); properties.forEach(config::setString); return this.useSnapshotId(config.get(SNAPSHOT_ID)) .caseSensitive(config.get(CASE_SENSITIVE)) .asOfTimestamp(config.get(AS_OF_TIMESTAMP)) .startSnapshotId(config.get(START_SNAPSHOT_ID)) .endSnapshotId(config.get(END_SNAPSHOT_ID)) .splitSize(config.get(SPLIT_SIZE)) .splitLookback(config.get(SPLIT_LOOKBACK)) .splitOpenFileCost(config.get(SPLIT_FILE_OPEN_COST)) .streaming(config.get(STREAMING)) .monitorInterval(config.get(MONITOR_INTERVAL)) .nameMapping(properties.get(DEFAULT_NAME_MAPPING)); } public ScanContext build() { return new ScanContext(caseSensitive, snapshotId, startSnapshotId, endSnapshotId, asOfTimestamp, splitSize, splitLookback, splitOpenFileCost, isStreaming, monitorInterval, nameMapping, projectedSchema, filters, limit); } } }
1
34,629
Why do we need to make so much more of this ScanContext public?
apache-iceberg
java
@@ -54,7 +54,6 @@ class ProductFilterFormType extends AbstractType 'choices' => $config->getFlagChoices(), 'choice_label' => 'name', 'choice_value' => 'id', - 'choice_name' => 'id', 'multiple' => true, 'expanded' => true, ])
1
<?php namespace Shopsys\ShopBundle\Form\Front\Product; use Shopsys\FrameworkBundle\Component\Money\Money; use Shopsys\FrameworkBundle\Form\Constraints\NotNegativeMoneyAmount; use Shopsys\FrameworkBundle\Model\Product\Filter\ProductFilterConfig; use Shopsys\FrameworkBundle\Model\Product\Filter\ProductFilterData; use Symfony\Component\Form\AbstractType; use Symfony\Component\Form\Extension\Core\Type\CheckboxType; use Symfony\Component\Form\Extension\Core\Type\ChoiceType; use Symfony\Component\Form\Extension\Core\Type\MoneyType; use Symfony\Component\Form\Extension\Core\Type\SubmitType; use Symfony\Component\Form\FormBuilderInterface; use Symfony\Component\OptionsResolver\OptionsResolver; class ProductFilterFormType extends AbstractType { /** * @param \Symfony\Component\Form\FormBuilderInterface $builder * @param array $options */ public function buildForm(FormBuilderInterface $builder, array $options) { /** @var \Shopsys\FrameworkBundle\Model\Product\Filter\ProductFilterConfig $config */ $config = $options['product_filter_config']; $moneyBuilder = $builder->create('money', MoneyType::class); $builder ->add('minimalPrice', MoneyType::class, [ 'required' => false, 'attr' => ['placeholder' => $this->transformMoneyToView($config->getPriceRange()->getMinimalPrice(), $moneyBuilder)], 'invalid_message' => 'Please enter price in correct format (positive number with decimal separator)', 'constraints' => [ new NotNegativeMoneyAmount(['message' => 'Price must be greater or equal to zero']), ], ]) ->add('maximalPrice', MoneyType::class, [ 'required' => false, 'attr' => ['placeholder' => $this->transformMoneyToView($config->getPriceRange()->getMaximalPrice(), $moneyBuilder)], 'invalid_message' => 'Please enter price in correct format (positive number with decimal separator)', 'constraints' => [ new NotNegativeMoneyAmount(['message' => 'Price must be greater or equal to zero']), ], ]) ->add('parameters', ParameterFilterFormType::class, [ 'required' => false, 'product_filter_config' => $config, ]) ->add('inStock', CheckboxType::class, ['required' => false]) ->add('flags', ChoiceType::class, [ 'required' => false, 'choices' => $config->getFlagChoices(), 'choice_label' => 'name', 'choice_value' => 'id', 'choice_name' => 'id', 'multiple' => true, 'expanded' => true, ]) ->add('brands', ChoiceType::class, [ 'required' => false, 'choices' => $config->getBrandChoices(), 'choice_label' => 'name', 'choice_value' => 'id', 'choice_name' => 'id', 'multiple' => true, 'expanded' => true, ]) ->add('search', SubmitType::class); } /** * @param \Symfony\Component\OptionsResolver\OptionsResolver $resolver */ public function configureOptions(OptionsResolver $resolver) { $resolver ->setRequired('product_filter_config') ->setAllowedTypes('product_filter_config', ProductFilterConfig::class) ->setDefaults([ 'attr' => ['novalidate' => 'novalidate'], 'data_class' => ProductFilterData::class, 'method' => 'GET', 'csrf_protection' => false, ]); } /** * @param \Shopsys\FrameworkBundle\Component\Money\Money $money * @param \Symfony\Component\Form\FormBuilderInterface $moneyBuilder * @return string */ protected function transformMoneyToView(Money $money, FormBuilderInterface $moneyBuilder): string { foreach ($moneyBuilder->getModelTransformers() as $modelTransformer) { /** @var \Symfony\Component\Form\DataTransformerInterface $modelTransformer */ $money = $modelTransformer->transform($money); } foreach ($moneyBuilder->getViewTransformers() as $viewTransformer) { /** @var \Symfony\Component\Form\DataTransformerInterface $viewTransformer */ $money = $viewTransformer->transform($money); } return $money; } }
1
15,399
is choice_name not needed anymore?
shopsys-shopsys
php
@@ -226,6 +226,9 @@ public class Parquet { set("parquet.avro.write-old-list-structure", "false"); MessageType type = ParquetSchemaUtil.convert(schema, name); + // Check that our metrics make sense + metricsConfig.validateProperties(schema); + if (createWriterFunc != null) { Preconditions.checkArgument(writeSupport == null, "Cannot write with both write support and Parquet value writer");
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.parquet; import java.io.File; import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.FileFormat; import org.apache.iceberg.Files; import org.apache.iceberg.MetricsConfig; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.avro.AvroSchemaUtil; import org.apache.iceberg.data.parquet.GenericParquetWriter; import org.apache.iceberg.deletes.EqualityDeleteWriter; import org.apache.iceberg.deletes.PositionDeleteWriter; import org.apache.iceberg.encryption.EncryptionKeyMetadata; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.hadoop.HadoopOutputFile; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.DeleteSchemaUtil; import org.apache.iceberg.io.FileAppender; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.mapping.NameMapping; import org.apache.iceberg.parquet.ParquetValueWriters.PositionDeleteStructWriter; import org.apache.iceberg.parquet.ParquetValueWriters.StructWriter; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.util.ArrayUtil; import org.apache.parquet.HadoopReadOptions; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.avro.AvroWriteSupport; import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.column.ParquetProperties.WriterVersion; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.ParquetFileWriter; import org.apache.parquet.hadoop.ParquetReader; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT; public class Parquet { private Parquet() { } private static final Collection<String> READ_PROPERTIES_TO_REMOVE = Sets.newHashSet( "parquet.read.filter", "parquet.private.read.filter.predicate", "parquet.read.support.class"); public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private final Map<String, String> metadata = Maps.newLinkedHashMap(); private final Map<String, String> config = Maps.newLinkedHashMap(); private Schema schema = null; private String name = "table"; private WriteSupport<?> writeSupport = null; private Function<MessageType, ParquetValueWriter<?>> createWriterFunc = null; private MetricsConfig metricsConfig = MetricsConfig.getDefault(); private ParquetFileWriter.Mode writeMode = ParquetFileWriter.Mode.CREATE; private WriteBuilder(OutputFile file) { this.file = file; } public WriteBuilder forTable(Table table) { schema(table.schema()); setAll(table.properties()); metricsConfig(MetricsConfig.fromProperties(table.properties())); return this; } public WriteBuilder schema(Schema newSchema) { this.schema = newSchema; return this; } public WriteBuilder named(String newName) { this.name = newName; return this; } public WriteBuilder writeSupport(WriteSupport<?> newWriteSupport) { this.writeSupport = newWriteSupport; return this; } public WriteBuilder set(String property, String value) { config.put(property, value); return this; } public WriteBuilder setAll(Map<String, String> properties) { config.putAll(properties); return this; } public WriteBuilder meta(String property, String value) { metadata.put(property, value); return this; } public WriteBuilder createWriterFunc(Function<MessageType, ParquetValueWriter<?>> newCreateWriterFunc) { this.createWriterFunc = newCreateWriterFunc; return this; } public WriteBuilder metricsConfig(MetricsConfig newMetricsConfig) { this.metricsConfig = newMetricsConfig; return this; } public WriteBuilder overwrite() { return overwrite(true); } public WriteBuilder overwrite(boolean enabled) { this.writeMode = enabled ? ParquetFileWriter.Mode.OVERWRITE : ParquetFileWriter.Mode.CREATE; return this; } @SuppressWarnings("unchecked") private <T> WriteSupport<T> getWriteSupport(MessageType type) { if (writeSupport != null) { return (WriteSupport<T>) writeSupport; } else { return new AvroWriteSupport<>( type, ParquetAvro.parquetAvroSchema(AvroSchemaUtil.convert(schema, name)), ParquetAvro.DEFAULT_MODEL); } } private CompressionCodecName codec() { String codec = config.getOrDefault(PARQUET_COMPRESSION, PARQUET_COMPRESSION_DEFAULT); try { return CompressionCodecName.valueOf(codec.toUpperCase(Locale.ENGLISH)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unsupported compression codec: " + codec); } } public <D> FileAppender<D> build() throws IOException { Preconditions.checkNotNull(schema, "Schema is required"); Preconditions.checkNotNull(name, "Table name is required and cannot be null"); // add the Iceberg schema to keyValueMetadata meta("iceberg.schema", SchemaParser.toJson(schema)); // Map Iceberg properties to pass down to the Parquet writer int rowGroupSize = Integer.parseInt(config.getOrDefault( PARQUET_ROW_GROUP_SIZE_BYTES, PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT)); int pageSize = Integer.parseInt(config.getOrDefault( PARQUET_PAGE_SIZE_BYTES, PARQUET_PAGE_SIZE_BYTES_DEFAULT)); int dictionaryPageSize = Integer.parseInt(config.getOrDefault( PARQUET_DICT_SIZE_BYTES, PARQUET_DICT_SIZE_BYTES_DEFAULT)); String compressionLevel = config.getOrDefault( PARQUET_COMPRESSION_LEVEL, PARQUET_COMPRESSION_LEVEL_DEFAULT); if (compressionLevel != null) { switch (codec()) { case GZIP: config.put("zlib.compress.level", compressionLevel); break; case BROTLI: config.put("compression.brotli.quality", compressionLevel); break; case ZSTD: config.put("io.compression.codec.zstd.level", compressionLevel); break; default: // compression level is not supported; ignore it } } WriterVersion writerVersion = WriterVersion.PARQUET_1_0; set("parquet.avro.write-old-list-structure", "false"); MessageType type = ParquetSchemaUtil.convert(schema, name); if (createWriterFunc != null) { Preconditions.checkArgument(writeSupport == null, "Cannot write with both write support and Parquet value writer"); Configuration conf; if (file instanceof HadoopOutputFile) { conf = ((HadoopOutputFile) file).getConf(); } else { conf = new Configuration(); } for (Map.Entry<String, String> entry : config.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } ParquetProperties parquetProperties = ParquetProperties.builder() .withWriterVersion(writerVersion) .withPageSize(pageSize) .withDictionaryPageSize(dictionaryPageSize) .build(); return new org.apache.iceberg.parquet.ParquetWriter<>( conf, file, schema, rowGroupSize, metadata, createWriterFunc, codec(), parquetProperties, metricsConfig, writeMode); } else { return new ParquetWriteAdapter<>(new ParquetWriteBuilder<D>(ParquetIO.file(file)) .withWriterVersion(writerVersion) .setType(type) .setConfig(config) .setKeyValueMetadata(metadata) .setWriteSupport(getWriteSupport(type)) .withCompressionCodec(codec()) .withWriteMode(writeMode) .withRowGroupSize(rowGroupSize) .withPageSize(pageSize) .withDictionaryPageSize(dictionaryPageSize) .build(), metricsConfig); } } } public static DeleteWriteBuilder writeDeletes(OutputFile file) { return new DeleteWriteBuilder(file); } public static class DeleteWriteBuilder { private final WriteBuilder appenderBuilder; private final String location; private Function<MessageType, ParquetValueWriter<?>> createWriterFunc = null; private Schema rowSchema = null; private PartitionSpec spec = null; private StructLike partition = null; private EncryptionKeyMetadata keyMetadata = null; private int[] equalityFieldIds = null; private Function<CharSequence, ?> pathTransformFunc = Function.identity(); private DeleteWriteBuilder(OutputFile file) { this.appenderBuilder = write(file); this.location = file.location(); } public DeleteWriteBuilder forTable(Table table) { rowSchema(table.schema()); withSpec(table.spec()); setAll(table.properties()); metricsConfig(MetricsConfig.fromProperties(table.properties())); return this; } public DeleteWriteBuilder set(String property, String value) { appenderBuilder.set(property, value); return this; } public DeleteWriteBuilder setAll(Map<String, String> properties) { appenderBuilder.setAll(properties); return this; } public DeleteWriteBuilder meta(String property, String value) { appenderBuilder.meta(property, value); return this; } public DeleteWriteBuilder overwrite() { return overwrite(true); } public DeleteWriteBuilder overwrite(boolean enabled) { appenderBuilder.overwrite(enabled); return this; } public DeleteWriteBuilder metricsConfig(MetricsConfig newMetricsConfig) { // TODO: keep full metrics for position delete file columns appenderBuilder.metricsConfig(newMetricsConfig); return this; } public DeleteWriteBuilder createWriterFunc(Function<MessageType, ParquetValueWriter<?>> newCreateWriterFunc) { this.createWriterFunc = newCreateWriterFunc; return this; } public DeleteWriteBuilder rowSchema(Schema newSchema) { this.rowSchema = newSchema; return this; } public DeleteWriteBuilder withSpec(PartitionSpec newSpec) { this.spec = newSpec; return this; } public DeleteWriteBuilder withPartition(StructLike key) { this.partition = key; return this; } public DeleteWriteBuilder withKeyMetadata(EncryptionKeyMetadata metadata) { this.keyMetadata = metadata; return this; } public DeleteWriteBuilder equalityFieldIds(List<Integer> fieldIds) { this.equalityFieldIds = ArrayUtil.toIntArray(fieldIds); return this; } public DeleteWriteBuilder equalityFieldIds(int... fieldIds) { this.equalityFieldIds = fieldIds; return this; } public DeleteWriteBuilder transformPaths(Function<CharSequence, ?> newPathTransformFunc) { this.pathTransformFunc = newPathTransformFunc; return this; } public <T> EqualityDeleteWriter<T> buildEqualityWriter() throws IOException { Preconditions.checkState(rowSchema != null, "Cannot create equality delete file without a schema`"); Preconditions.checkState(equalityFieldIds != null, "Cannot create equality delete file without delete field ids"); Preconditions.checkState(createWriterFunc != null, "Cannot create equality delete file unless createWriterFunc is set"); meta("delete-type", "equality"); meta("delete-field-ids", IntStream.of(equalityFieldIds) .mapToObj(Objects::toString) .collect(Collectors.joining(", "))); // the appender uses the row schema without extra columns appenderBuilder.schema(rowSchema); appenderBuilder.createWriterFunc(createWriterFunc); return new EqualityDeleteWriter<>( appenderBuilder.build(), FileFormat.PARQUET, location, spec, partition, keyMetadata, equalityFieldIds); } public <T> PositionDeleteWriter<T> buildPositionWriter() throws IOException { Preconditions.checkState(equalityFieldIds == null, "Cannot create position delete file using delete field ids"); meta("delete-type", "position"); if (rowSchema != null && createWriterFunc != null) { // the appender uses the row schema wrapped with position fields appenderBuilder.schema(DeleteSchemaUtil.posDeleteSchema(rowSchema)); appenderBuilder.createWriterFunc(parquetSchema -> { ParquetValueWriter<?> writer = createWriterFunc.apply(parquetSchema); if (writer instanceof StructWriter) { return new PositionDeleteStructWriter<T>((StructWriter<?>) writer, pathTransformFunc); } else { throw new UnsupportedOperationException("Cannot wrap writer for position deletes: " + writer.getClass()); } }); } else { appenderBuilder.schema(DeleteSchemaUtil.pathPosSchema()); appenderBuilder.createWriterFunc(parquetSchema -> new PositionDeleteStructWriter<T>((StructWriter<?>) GenericParquetWriter.buildWriter(parquetSchema), Function.identity())); } return new PositionDeleteWriter<>( appenderBuilder.build(), FileFormat.PARQUET, location, spec, partition, keyMetadata); } } private static class ParquetWriteBuilder<T> extends ParquetWriter.Builder<T, ParquetWriteBuilder<T>> { private Map<String, String> keyValueMetadata = Maps.newHashMap(); private Map<String, String> config = Maps.newHashMap(); private MessageType type; private WriteSupport<T> writeSupport; private ParquetWriteBuilder(org.apache.parquet.io.OutputFile path) { super(path); } @Override protected ParquetWriteBuilder<T> self() { return this; } public ParquetWriteBuilder<T> setKeyValueMetadata(Map<String, String> keyValueMetadata) { this.keyValueMetadata = keyValueMetadata; return self(); } public ParquetWriteBuilder<T> setConfig(Map<String, String> config) { this.config = config; return self(); } public ParquetWriteBuilder<T> setType(MessageType type) { this.type = type; return self(); } public ParquetWriteBuilder<T> setWriteSupport(WriteSupport<T> writeSupport) { this.writeSupport = writeSupport; return self(); } @Override protected WriteSupport<T> getWriteSupport(Configuration configuration) { for (Map.Entry<String, String> entry : config.entrySet()) { configuration.set(entry.getKey(), entry.getValue()); } return new ParquetWriteSupport<>(type, keyValueMetadata, writeSupport); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private final Map<String, String> properties = Maps.newHashMap(); private Long start = null; private Long length = null; private Schema schema = null; private Expression filter = null; private ReadSupport<?> readSupport = null; private Function<MessageType, VectorizedReader<?>> batchedReaderFunc = null; private Function<MessageType, ParquetValueReader<?>> readerFunc = null; private boolean filterRecords = true; private boolean caseSensitive = true; private boolean callInit = false; private boolean reuseContainers = false; private int maxRecordsPerBatch = 10000; private NameMapping nameMapping = null; private ReadBuilder(InputFile file) { this.file = file; } /** * Restricts the read to the given range: [start, start + length). * * @param newStart the start position for this read * @param newLength the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long newStart, long newLength) { this.start = newStart; this.length = newLength; return this; } public ReadBuilder project(Schema newSchema) { this.schema = newSchema; return this; } public ReadBuilder caseInsensitive() { return caseSensitive(false); } public ReadBuilder caseSensitive(boolean newCaseSensitive) { this.caseSensitive = newCaseSensitive; return this; } public ReadBuilder filterRecords(boolean newFilterRecords) { this.filterRecords = newFilterRecords; return this; } public ReadBuilder filter(Expression newFilter) { this.filter = newFilter; return this; } public ReadBuilder readSupport(ReadSupport<?> newFilterSupport) { this.readSupport = newFilterSupport; return this; } public ReadBuilder createReaderFunc(Function<MessageType, ParquetValueReader<?>> newReaderFunction) { Preconditions.checkArgument(this.batchedReaderFunc == null, "Reader function cannot be set since the batched version is already set"); this.readerFunc = newReaderFunction; return this; } public ReadBuilder createBatchedReaderFunc(Function<MessageType, VectorizedReader<?>> func) { Preconditions.checkArgument(this.readerFunc == null, "Batched reader function cannot be set since the non-batched version is already set"); this.batchedReaderFunc = func; return this; } public ReadBuilder set(String key, String value) { properties.put(key, value); return this; } public ReadBuilder callInit() { this.callInit = true; return this; } public ReadBuilder reuseContainers() { this.reuseContainers = true; return this; } public ReadBuilder recordsPerBatch(int numRowsPerBatch) { this.maxRecordsPerBatch = numRowsPerBatch; return this; } public ReadBuilder withNameMapping(NameMapping newNameMapping) { this.nameMapping = newNameMapping; return this; } @SuppressWarnings({"unchecked", "checkstyle:CyclomaticComplexity"}) public <D> CloseableIterable<D> build() { if (readerFunc != null || batchedReaderFunc != null) { ParquetReadOptions.Builder optionsBuilder; if (file instanceof HadoopInputFile) { // remove read properties already set that may conflict with this read Configuration conf = new Configuration(((HadoopInputFile) file).getConf()); for (String property : READ_PROPERTIES_TO_REMOVE) { conf.unset(property); } optionsBuilder = HadoopReadOptions.builder(conf); } else { optionsBuilder = ParquetReadOptions.builder(); } for (Map.Entry<String, String> entry : properties.entrySet()) { optionsBuilder.set(entry.getKey(), entry.getValue()); } if (start != null) { optionsBuilder.withRange(start, start + length); } ParquetReadOptions options = optionsBuilder.build(); if (batchedReaderFunc != null) { return new VectorizedParquetReader<>(file, schema, options, batchedReaderFunc, nameMapping, filter, reuseContainers, caseSensitive, maxRecordsPerBatch); } else { return new org.apache.iceberg.parquet.ParquetReader<>( file, schema, options, readerFunc, nameMapping, filter, reuseContainers, caseSensitive); } } ParquetReadBuilder<D> builder = new ParquetReadBuilder<>(ParquetIO.file(file)); builder.project(schema); if (readSupport != null) { builder.readSupport((ReadSupport<D>) readSupport); } else { builder.readSupport(new AvroReadSupport<>(ParquetAvro.DEFAULT_MODEL)); } // default options for readers builder.set("parquet.strict.typing", "false") // allow type promotion .set("parquet.avro.compatible", "false") // use the new RecordReader with Utf8 support .set("parquet.avro.add-list-element-records", "false"); // assume that lists use a 3-level schema for (Map.Entry<String, String> entry : properties.entrySet()) { builder.set(entry.getKey(), entry.getValue()); } if (filter != null) { // TODO: should not need to get the schema to push down before opening the file. // Parquet should allow setting a filter inside its read support MessageType type; try (ParquetFileReader schemaReader = ParquetFileReader.open(ParquetIO.file(file))) { type = schemaReader.getFileMetaData().getSchema(); } catch (IOException e) { throw new RuntimeIOException(e); } Schema fileSchema = ParquetSchemaUtil.convert(type); builder.useStatsFilter() .useDictionaryFilter() .useRecordFilter(filterRecords) .withFilter(ParquetFilters.convert(fileSchema, filter, caseSensitive)); } else { // turn off filtering builder.useStatsFilter(false) .useDictionaryFilter(false) .useRecordFilter(false); } if (callInit) { builder.callInit(); } if (start != null) { builder.withFileRange(start, start + length); } if (nameMapping != null) { builder.withNameMapping(nameMapping); } return new ParquetIterable<>(builder); } } private static class ParquetReadBuilder<T> extends ParquetReader.Builder<T> { private Schema schema = null; private ReadSupport<T> readSupport = null; private boolean callInit = false; private NameMapping nameMapping = null; private ParquetReadBuilder(org.apache.parquet.io.InputFile file) { super(file); } public ParquetReadBuilder<T> project(Schema newSchema) { this.schema = newSchema; return this; } public ParquetReadBuilder<T> withNameMapping(NameMapping newNameMapping) { this.nameMapping = newNameMapping; return this; } public ParquetReadBuilder<T> readSupport(ReadSupport<T> newReadSupport) { this.readSupport = newReadSupport; return this; } public ParquetReadBuilder<T> callInit() { this.callInit = true; return this; } @Override protected ReadSupport<T> getReadSupport() { return new ParquetReadSupport<>(schema, readSupport, callInit, nameMapping); } } /** * Combines several files into one * * @param inputFiles an {@link Iterable} of parquet files. The order of iteration determines the order in which * content of files are read and written to the {@code outputFile} * @param outputFile the output parquet file containing all the data from {@code inputFiles} * @param rowGroupSize the row group size to use when writing the {@code outputFile} * @param schema the schema of the data * @param metadata extraMetadata to write at the footer of the {@code outputFile} */ public static void concat(Iterable<File> inputFiles, File outputFile, int rowGroupSize, Schema schema, Map<String, String> metadata) throws IOException { OutputFile file = Files.localOutput(outputFile); ParquetFileWriter writer = new ParquetFileWriter( ParquetIO.file(file), ParquetSchemaUtil.convert(schema, "table"), ParquetFileWriter.Mode.CREATE, rowGroupSize, 0); writer.start(); for (File inputFile : inputFiles) { writer.appendFile(ParquetIO.file(Files.localInput(inputFile))); } writer.end(metadata); } }
1
31,329
Is this the right place to do the validation? If a user adds a bad property or performs some schema update that causes a validation error, that would break all writes to the table. To me, it doesn't seem like we are catching the problem early enough and possibly allowing a typo to break scheduled jobs. What do you think about adding this validation when altering the table? `UpdateProperties` could check whether any properties starting with `write.metadata.metrics` were modified and run this. Similarly, `UpdateSchema` could run this as well, although I think that we should probably modify `UpdateSchema` to simply update the properties for column renames (if that's easily done).
apache-iceberg
java
@@ -2467,11 +2467,17 @@ hipError_t hipHccGetAcceleratorView(hipStream_t stream, hc::accelerator_view** a namespace hip_impl { std::vector<hsa_agent_t> all_hsa_agents() { std::vector<hsa_agent_t> r{}; - for (auto&& acc : hc::accelerator::get_all()) { + auto accelerators = hc::accelerator::get_all(); + for (int i = 0; i < accelerators.size(); i++) { + auto&& acc = accelerators[i]; const auto agent = acc.get_hsa_agent(); if (!agent || !acc.is_hsa_accelerator()) continue; + // If device is not in visible devices list, ignore + if (std::find(g_hip_visible_devices.begin(), g_hip_visible_devices.end(), (i - 1)) == + g_hip_visible_devices.end()) continue; + r.emplace_back(*static_cast<hsa_agent_t*>(agent)); }
1
/* Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * @file hip_hcc.cpp * * Contains definitions for functions that are large enough that we don't want to inline them * everywhere. This file is compiled and linked into apps running HIP / HCC path. */ #include <assert.h> #include <exception> #include <stdint.h> #include <iostream> #include <sstream> #include <list> #include <sys/types.h> #include <unistd.h> #include <deque> #include <vector> #include <algorithm> #include <atomic> #include <mutex> #include <hc.hpp> #include <hc_am.hpp> #include "hsa/hsa_ext_amd.h" #include "hip/hip_runtime.h" #include "hip_hcc_internal.h" #include "trace_helper.h" #include "env.h" // TODO - create a stream-based debug interface as an additional option for tprintf #define DB_PEER_CTX 0 //================================================================================================= // Global variables: //================================================================================================= const int release = 1; const char* API_COLOR = KGRN; const char* API_COLOR_END = KNRM; int HIP_LAUNCH_BLOCKING = 0; std::string HIP_LAUNCH_BLOCKING_KERNELS; std::vector<std::string> g_hipLaunchBlockingKernels; int HIP_API_BLOCKING = 0; int HIP_PRINT_ENV = 0; int HIP_TRACE_API = 0; std::string HIP_TRACE_API_COLOR("green"); int HIP_PROFILE_API = 0; // TODO - DB_START/STOP need more testing. std::string HIP_DB_START_API; std::string HIP_DB_STOP_API; int HIP_DB = 0; int HIP_VISIBLE_DEVICES = 0; int HIP_WAIT_MODE = 0; int HIP_FORCE_P2P_HOST = 0; int HIP_FAIL_SOC = 0; int HIP_DENY_PEER_ACCESS = 0; int HIP_HIDDEN_FREE_MEM = 256; // Force async copies to actually use the synchronous copy interface. int HIP_FORCE_SYNC_COPY = 0; // TODO - set these to 0 and 1 int HIP_EVENT_SYS_RELEASE = 0; int HIP_HOST_COHERENT = 1; int HIP_SYNC_HOST_ALLOC = 1; int HIP_INIT_ALLOC = -1; int HIP_SYNC_STREAM_WAIT = 0; int HIP_FORCE_NULL_STREAM = 0; int HIP_DUMP_CODE_OBJECT = 0; #if (__hcc_workweek__ >= 17300) // Make sure we have required bug fix in HCC // Perform resolution on the GPU: // Chicken bit to sync on host to implement null stream. // If 0, null stream synchronization is performed on the GPU int HIP_SYNC_NULL_STREAM = 0; #else int HIP_SYNC_NULL_STREAM = 1; #endif // HIP needs to change some behavior based on HCC_OPT_FLUSH : #if (__hcc_workweek__ >= 17296) int HCC_OPT_FLUSH = 1; #else #warning "HIP disabled HCC_OPT_FLUSH since HCC version does not yet support" int HCC_OPT_FLUSH = 0; #endif // Array of pointers to devices. ihipDevice_t** g_deviceArray; bool g_visible_device = false; unsigned g_deviceCnt; std::vector<int> g_hip_visible_devices; hsa_agent_t g_cpu_agent; hsa_agent_t* g_allAgents; // CPU agents + all the visible GPU agents. unsigned g_numLogicalThreads; std::atomic<int> g_lastShortTid(1); // Indexed by short-tid: // std::vector<ProfTrigger> g_dbStartTriggers; std::vector<ProfTrigger> g_dbStopTriggers; //================================================================================================= // Thread-local storage: //================================================================================================= // This is the implicit context used by all HIP commands. // It can be set by hipSetDevice or by the CTX manipulation commands: thread_local hipError_t tls_lastHipError = hipSuccess; thread_local TidInfo tls_tidInfo; //================================================================================================= // Top-level "free" functions: //================================================================================================= uint64_t recordApiTrace(std::string* fullStr, const std::string& apiStr) { auto apiSeqNum = tls_tidInfo.apiSeqNum(); auto tid = tls_tidInfo.tid(); if ((tid < g_dbStartTriggers.size()) && (apiSeqNum >= g_dbStartTriggers[tid].nextTrigger())) { printf("info: resume profiling at %lu\n", apiSeqNum); RESUME_PROFILING; g_dbStartTriggers.pop_back(); }; if ((tid < g_dbStopTriggers.size()) && (apiSeqNum >= g_dbStopTriggers[tid].nextTrigger())) { printf("info: stop profiling at %lu\n", apiSeqNum); STOP_PROFILING; g_dbStopTriggers.pop_back(); }; fullStr->reserve(16 + apiStr.length()); *fullStr = std::to_string(tid) + "."; *fullStr += std::to_string(apiSeqNum); *fullStr += " "; *fullStr += apiStr; uint64_t apiStartTick = getTicks(); if (COMPILE_HIP_DB && HIP_TRACE_API) { fprintf(stderr, "%s<<hip-api pid:%d tid:%s @%lu%s\n", API_COLOR, tls_tidInfo.pid(), fullStr->c_str(), apiStartTick, API_COLOR_END); } return apiStartTick; } static inline bool ihipIsValidDevice(unsigned deviceIndex) { // deviceIndex is unsigned so always > 0 return (deviceIndex < g_deviceCnt); } ihipDevice_t* ihipGetDevice(int deviceIndex) { if (ihipIsValidDevice(deviceIndex)) { return g_deviceArray[deviceIndex]; } else { return NULL; } } ihipCtx_t* ihipGetPrimaryCtx(unsigned deviceIndex) { ihipDevice_t* device = ihipGetDevice(deviceIndex); return device ? device->getPrimaryCtx() : NULL; }; static thread_local ihipCtx_t* tls_defaultCtx = nullptr; void ihipSetTlsDefaultCtx(ihipCtx_t* ctx) { tls_defaultCtx = ctx; } //--- // TODO - review the context creation strategy here. Really should be: // - first "non-device" runtime call creates the context for this thread. Allowed to call // setDevice first. // - hipDeviceReset destroys the primary context for device? // - Then context is created again for next usage. ihipCtx_t* ihipGetTlsDefaultCtx() { // Per-thread initialization of the TLS: if ((tls_defaultCtx == nullptr) && (g_deviceCnt > 0)) { ihipSetTlsDefaultCtx(ihipGetPrimaryCtx(0)); } return tls_defaultCtx; } hipError_t ihipSynchronize(void) { ihipGetTlsDefaultCtx()->locked_waitAllStreams(); // ignores non-blocking streams, this waits // for all activity to finish. return (hipSuccess); } //================================================================================================= // ihipStream_t: //================================================================================================= TidInfo::TidInfo() : _apiSeqNum(0) { _shortTid = g_lastShortTid.fetch_add(1); _pid = getpid(); if (COMPILE_HIP_DB && HIP_TRACE_API) { std::stringstream tid_ss; std::stringstream tid_ss_num; tid_ss_num << std::this_thread::get_id(); tid_ss << std::hex << std::stoull(tid_ss_num.str()); tprintf(DB_API, "HIP initialized short_tid#%d (maps to full_tid: 0x%s)\n", _shortTid, tid_ss.str().c_str()); }; } //================================================================================================= // ihipStream_t: //================================================================================================= //--- ihipStream_t::ihipStream_t(ihipCtx_t* ctx, hc::accelerator_view av, unsigned int flags) : _id(0), // will be set by add function. _flags(flags), _ctx(ctx), _criticalData(this, av) { unsigned schedBits = ctx->_ctxFlags & hipDeviceScheduleMask; switch (schedBits) { case hipDeviceScheduleAuto: _scheduleMode = Auto; break; case hipDeviceScheduleSpin: _scheduleMode = Spin; break; case hipDeviceScheduleYield: _scheduleMode = Yield; break; case hipDeviceScheduleBlockingSync: _scheduleMode = Yield; break; default: _scheduleMode = Auto; }; }; //--- ihipStream_t::~ihipStream_t() {} hc::hcWaitMode ihipStream_t::waitMode() const { hc::hcWaitMode waitMode = hc::hcWaitModeActive; if (_scheduleMode == Auto) { if (g_deviceCnt > g_numLogicalThreads) { waitMode = hc::hcWaitModeActive; } else { waitMode = hc::hcWaitModeBlocked; } } else if (_scheduleMode == Spin) { waitMode = hc::hcWaitModeActive; } else if (_scheduleMode == Yield) { waitMode = hc::hcWaitModeBlocked; } else { assert(0); // bad wait mode. } if (HIP_WAIT_MODE == 1) { waitMode = hc::hcWaitModeBlocked; } else if (HIP_WAIT_MODE == 2) { waitMode = hc::hcWaitModeActive; } return waitMode; } // Wait for all kernel and data copy commands in this stream to complete. // This signature should be used in routines that already have locked the stream mutex void ihipStream_t::wait(LockedAccessor_StreamCrit_t& crit) { tprintf(DB_SYNC, "%s wait for queue-empty..\n", ToString(this).c_str()); crit->_av.wait(waitMode()); crit->_kernelCnt = 0; } //--- // Wait for all kernel and data copy commands in this stream to complete. void ihipStream_t::locked_wait() { LockedAccessor_StreamCrit_t crit(_criticalData); wait(crit); }; // Causes current stream to wait for specified event to complete: // Note this does not provide any kind of host serialization. void ihipStream_t::locked_streamWaitEvent(ihipEventData_t& ecd) { LockedAccessor_StreamCrit_t crit(_criticalData); crit->_av.create_blocking_marker(ecd.marker(), hc::accelerator_scope); } // Causes current stream to wait for specified event to complete: // Note this does not provide any kind of host serialization. bool ihipStream_t::locked_eventIsReady(hipEvent_t event) { // Event query that returns "Complete" may cause HCC to manipulate // internal queue state so lock the stream's queue here. LockedAccessor_StreamCrit_t scrit(_criticalData); LockedAccessor_EventCrit_t ecrit(event->criticalData()); return (ecrit->_eventData.marker().is_ready()); } // Waiting on event can cause HCC to reclaim stream resources - so need to lock the stream. void ihipStream_t::locked_eventWaitComplete(hc::completion_future& marker, hc::hcWaitMode waitMode) { LockedAccessor_StreamCrit_t crit(_criticalData); marker.wait(waitMode); } // Create a marker in this stream. // Save state in the event so it can track the status of the event. hc::completion_future ihipStream_t::locked_recordEvent(hipEvent_t event) { // Lock the stream to prevent simultaneous access LockedAccessor_StreamCrit_t crit(_criticalData); auto scopeFlag = hc::accelerator_scope; // The env var HIP_EVENT_SYS_RELEASE sets the default, // The explicit flags override the env var (if specified) if (event->_flags & hipEventReleaseToSystem) { scopeFlag = hc::system_scope; } else if (event->_flags & hipEventReleaseToDevice) { scopeFlag = hc::accelerator_scope; } else { scopeFlag = HIP_EVENT_SYS_RELEASE ? hc::system_scope : hc::accelerator_scope; } return crit->_av.create_marker(scopeFlag); }; //============================================================================= //------------------------------------------------------------------------------------------------- //--- const ihipDevice_t* ihipStream_t::getDevice() const { return _ctx->getDevice(); }; ihipCtx_t* ihipStream_t::getCtx() const { return _ctx; }; //-- // Lock the stream to prevent other threads from intervening. LockedAccessor_StreamCrit_t ihipStream_t::lockopen_preKernelCommand() { LockedAccessor_StreamCrit_t crit(_criticalData, false /*no unlock at destruction*/); return crit; } //--- // Must be called after kernel finishes, this releases the lock on the stream so other commands can // submit. void ihipStream_t::lockclose_postKernelCommand(const char* kernelName, hc::accelerator_view* av) { bool blockThisKernel = false; if (!g_hipLaunchBlockingKernels.empty()) { std::string kernelNameString(kernelName); for (auto o = g_hipLaunchBlockingKernels.begin(); o != g_hipLaunchBlockingKernels.end(); o++) { if ((*o == kernelNameString)) { // printf ("force blocking for kernel %s\n", o->c_str()); blockThisKernel = true; } } } if (HIP_LAUNCH_BLOCKING || blockThisKernel) { // TODO - fix this so it goes through proper stream::wait() call.// direct wait OK since we // know the stream is locked. av->wait(hc::hcWaitModeActive); tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for kernel '%s' completion\n", ToString(this).c_str(), kernelName); } _criticalData.unlock(); // paired with lock from lockopen_preKernelCommand. }; //============================================================================= // Recompute the peercnt and the packed _peerAgents whenever a peer is added or deleted. // The packed _peerAgents can efficiently be used on each memory allocation. template <> void ihipCtxCriticalBase_t<CtxMutex>::recomputePeerAgents() { _peerCnt = 0; std::for_each(_peers.begin(), _peers.end(), [this](ihipCtx_t* ctx) { _peerAgents[_peerCnt++] = ctx->getDevice()->_hsaAgent; }); } template <> bool ihipCtxCriticalBase_t<CtxMutex>::isPeerWatcher(const ihipCtx_t* peer) { auto match = std::find_if(_peers.begin(), _peers.end(), [=](const ihipCtx_t* d) { return d->getDeviceNum() == peer->getDeviceNum(); }); return (match != std::end(_peers)); } template <> bool ihipCtxCriticalBase_t<CtxMutex>::addPeerWatcher(const ihipCtx_t* thisCtx, ihipCtx_t* peerWatcher) { auto match = std::find(_peers.begin(), _peers.end(), peerWatcher); if (match == std::end(_peers)) { // Not already a peer, let's update the list: tprintf(DB_COPY, "addPeerWatcher. Allocations on %s now visible to peerWatcher %s.\n", thisCtx->toString().c_str(), peerWatcher->toString().c_str()); _peers.push_back(peerWatcher); recomputePeerAgents(); return true; } // If we get here - peer was already on list, silently ignore. return false; } template <> bool ihipCtxCriticalBase_t<CtxMutex>::removePeerWatcher(const ihipCtx_t* thisCtx, ihipCtx_t* peerWatcher) { auto match = std::find(_peers.begin(), _peers.end(), peerWatcher); if (match != std::end(_peers)) { // Found a valid peer, let's remove it. tprintf( DB_COPY, "removePeerWatcher. Allocations on %s no longer visible to former peerWatcher %s.\n", thisCtx->toString().c_str(), peerWatcher->toString().c_str()); _peers.remove(peerWatcher); recomputePeerAgents(); return true; } else { return false; } } template <> void ihipCtxCriticalBase_t<CtxMutex>::resetPeerWatchers(ihipCtx_t* thisCtx) { tprintf(DB_COPY, "resetPeerWatchers for context=%s\n", thisCtx->toString().c_str()); _peers.clear(); _peerCnt = 0; addPeerWatcher(thisCtx, thisCtx); // peer-list always contains self agent. } template <> void ihipCtxCriticalBase_t<CtxMutex>::printPeerWatchers(FILE* f) const { for (auto iter = _peers.begin(); iter != _peers.end(); iter++) { fprintf(f, "%s ", (*iter)->toString().c_str()); }; } template <> void ihipCtxCriticalBase_t<CtxMutex>::addStream(ihipStream_t* stream) { stream->_id = _streams.size(); _streams.push_back(stream); tprintf(DB_SYNC, " addStream: %s\n", ToString(stream).c_str()); } template <> void ihipDeviceCriticalBase_t<DeviceMutex>::addContext(ihipCtx_t* ctx) { _ctxs.push_back(ctx); tprintf(DB_SYNC, " addContext: %s\n", ToString(ctx).c_str()); } //============================================================================= //================================================================================================= // ihipDevice_t //================================================================================================= ihipDevice_t::ihipDevice_t(unsigned deviceId, unsigned deviceCnt, hc::accelerator& acc) : _deviceId(deviceId), _acc(acc), _state(0), _criticalData(this) { hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent()); if (agent) { int err = hsa_agent_get_info( *agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &_computeUnits); if (err != HSA_STATUS_SUCCESS) { _computeUnits = 1; } _hsaAgent = *agent; } else { _hsaAgent.handle = static_cast<uint64_t>(-1); } initProperties(&_props); _primaryCtx = new ihipCtx_t(this, deviceCnt, hipDeviceMapHost); } ihipDevice_t::~ihipDevice_t() { delete _primaryCtx; _primaryCtx = NULL; } void ihipDevice_t::locked_removeContext(ihipCtx_t* c) { LockedAccessor_DeviceCrit_t crit(_criticalData); crit->ctxs().remove(c); tprintf(DB_SYNC, " locked_removeContext: %s\n", ToString(c).c_str()); } void ihipDevice_t::locked_reset() { // Obtain mutex access to the device critical data, release by destructor LockedAccessor_DeviceCrit_t crit(_criticalData); //--- // Wait for pending activity to complete? TODO - check if this is required behavior: tprintf(DB_SYNC, "locked_reset waiting for activity to complete.\n"); // Reset and remove streams: // Delete all created streams including the default one. for (auto ctxI = crit->const_ctxs().begin(); ctxI != crit->const_ctxs().end(); ctxI++) { ihipCtx_t* ctx = *ctxI; (*ctxI)->locked_reset(); tprintf(DB_SYNC, " ctx cleanup %s\n", ToString(ctx).c_str()); delete ctx; } // Clear the list. crit->ctxs().clear(); // reset _primaryCtx _primaryCtx->locked_reset(); tprintf(DB_SYNC, " _primaryCtx cleanup %s\n", ToString(_primaryCtx).c_str()); // Reset and release all memory stored in the tracker: // Reset will remove peer mapping so don't need to do this explicitly. // FIXME - This is clearly a non-const action! Is this a context reset or a device reset - // maybe should reference count? _state = 0; am_memtracker_reset(_acc); // FIXME - Calling am_memtracker_reset is really bad since it destroyed all buffers allocated by // the HCC runtime as well such as the printf buffer. Re-initialze the printf buffer as a // workaround for now. #ifdef HC_FEATURE_PRINTF Kalmar::getContext()->initPrintfBuffer(); #endif }; #define ErrorCheck(x) error_check(x, __LINE__, __FILE__) void error_check(hsa_status_t hsa_error_code, int line_num, std::string str) { if ((hsa_error_code != HSA_STATUS_SUCCESS) && (hsa_error_code != HSA_STATUS_INFO_BREAK)) { printf("HSA reported error!\n In file: %s\nAt line: %d\n", str.c_str(), line_num); } } //--- // Helper for initProperties // Determines if the given agent is of type HSA_DEVICE_TYPE_GPU and counts it. static hsa_status_t countGpuAgents(hsa_agent_t agent, void* data) { if (data == NULL) { return HSA_STATUS_ERROR_INVALID_ARGUMENT; } hsa_device_type_t device_type; hsa_status_t status = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); if (status != HSA_STATUS_SUCCESS) { return status; } if (device_type == HSA_DEVICE_TYPE_GPU) { (*static_cast<int*>(data))++; } return HSA_STATUS_SUCCESS; } hsa_status_t FindGpuDevice(hsa_agent_t agent, void* data) { if (data == NULL) { return HSA_STATUS_ERROR_INVALID_ARGUMENT; } hsa_device_type_t hsa_device_type; hsa_status_t hsa_error_code = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &hsa_device_type); if (hsa_error_code != HSA_STATUS_SUCCESS) { return hsa_error_code; } if (hsa_device_type == HSA_DEVICE_TYPE_GPU) { *((hsa_agent_t*)data) = agent; return HSA_STATUS_INFO_BREAK; } return HSA_STATUS_SUCCESS; } hsa_status_t GetDevicePool(hsa_amd_memory_pool_t pool, void* data) { if (NULL == data) { return HSA_STATUS_ERROR_INVALID_ARGUMENT; } hsa_status_t err; hsa_amd_segment_t segment; uint32_t flag; err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, &segment); ErrorCheck(err); if (HSA_AMD_SEGMENT_GLOBAL != segment) return HSA_STATUS_SUCCESS; err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &flag); ErrorCheck(err); *((hsa_amd_memory_pool_t*)data) = pool; return HSA_STATUS_SUCCESS; } int checkAccess(hsa_agent_t agent, hsa_amd_memory_pool_t pool) { hsa_status_t err; hsa_amd_memory_pool_access_t access; err = hsa_amd_agent_memory_pool_get_info(agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); ErrorCheck(err); return access; } hsa_status_t get_pool_info(hsa_amd_memory_pool_t pool, void* data) { hsa_status_t err; hipDeviceProp_t* p_prop = reinterpret_cast<hipDeviceProp_t*>(data); uint32_t region_segment; // Get pool segment err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, &region_segment); ErrorCheck(err); switch (region_segment) { case HSA_REGION_SEGMENT_READONLY: err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE, &(p_prop->totalConstMem)); break; case HSA_REGION_SEGMENT_GROUP: err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE, &(p_prop->sharedMemPerBlock)); break; default: break; } return err; } // Determines if the given agent is of type HSA_DEVICE_TYPE_GPU and counts it. static hsa_status_t findCpuAgent(hsa_agent_t agent, void* data) { hsa_device_type_t device_type; hsa_status_t status = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); if (status != HSA_STATUS_SUCCESS) { return status; } if (device_type == HSA_DEVICE_TYPE_CPU) { (*static_cast<hsa_agent_t*>(data)) = agent; return HSA_STATUS_INFO_BREAK; } return HSA_STATUS_SUCCESS; } #define DeviceErrorCheck(x) \ if (x != HSA_STATUS_SUCCESS) { \ return hipErrorInvalidDevice; \ } //--- // Initialize properties for the device. // Call this once when the ihipDevice_t is created: hipError_t ihipDevice_t::initProperties(hipDeviceProp_t* prop) { hipError_t e = hipSuccess; hsa_status_t err; memset(prop, 0, sizeof(hipDeviceProp_t)); if (_hsaAgent.handle == -1) { return hipErrorInvalidDevice; } // Iterates over the agents to determine Multiple GPU devices // using the countGpuAgents callback. //! @bug : on HCC, isMultiGpuBoard returns True if system contains multiple GPUS (rather than if //! GPU is on a multi-ASIC board) int gpuAgentsCount = 0; err = hsa_iterate_agents(countGpuAgents, &gpuAgentsCount); if (err == HSA_STATUS_INFO_BREAK) { err = HSA_STATUS_SUCCESS; } DeviceErrorCheck(err); prop->isMultiGpuBoard = 0 ? gpuAgentsCount < 2 : 1; // Get agent name err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_PRODUCT_NAME, &(prop->name)); DeviceErrorCheck(err); char archName[256]; err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_NAME, &archName); prop->gcnArch = atoi(archName + 3); DeviceErrorCheck(err); // Get agent node uint32_t node; err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_NODE, &node); DeviceErrorCheck(err); // Get wavefront size err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &prop->warpSize); DeviceErrorCheck(err); // Get max total number of work-items in a workgroup err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WORKGROUP_MAX_SIZE, &prop->maxThreadsPerBlock); DeviceErrorCheck(err); // Get max number of work-items of each dimension of a work-group uint16_t work_group_max_dim[3]; err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM, work_group_max_dim); DeviceErrorCheck(err); for (int i = 0; i < 3; i++) { prop->maxThreadsDim[i] = work_group_max_dim[i]; } hsa_dim3_t grid_max_dim; err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim); DeviceErrorCheck(err); prop->maxGridSize[0] = (int)((grid_max_dim.x == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.x); prop->maxGridSize[1] = (int)((grid_max_dim.y == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.y); prop->maxGridSize[2] = (int)((grid_max_dim.z == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.z); // Get Max clock frequency err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_MAX_CLOCK_FREQUENCY, &prop->clockRate); prop->clockRate *= 1000.0; // convert Mhz to Khz. DeviceErrorCheck(err); uint64_t counterHz; err = hsa_system_get_info(HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, &counterHz); DeviceErrorCheck(err); prop->clockInstructionRate = counterHz / 1000; // Get Agent BDFID (bus/device/function ID) uint16_t bdf_id = 1; err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_BDFID, &bdf_id); DeviceErrorCheck(err); // BDFID is 16bit uint: [8bit - BusID | 5bit - Device ID | 3bit - Function/DomainID] prop->pciDomainID = bdf_id & 0x7; prop->pciDeviceID = (bdf_id >> 3) & 0x1F; prop->pciBusID = (bdf_id >> 8) & 0xFF; // Masquerade as a 3.0-level device. This will change as more HW functions are properly // supported. Application code should use the arch.has* to do detailed feature detection. prop->major = 3; prop->minor = 0; // Get number of Compute Unit err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &(prop->multiProcessorCount)); DeviceErrorCheck(err); // TODO-hsart - this appears to return 0? uint32_t cache_size[4]; err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_CACHE_SIZE, cache_size); DeviceErrorCheck(err); prop->l2CacheSize = cache_size[1]; /* Computemode for HSA Devices is always : cudaComputeModeDefault */ prop->computeMode = 0; _isLargeBar = _acc.has_cpu_accessible_am(); // Get Max Threads Per Multiprocessor uint32_t max_waves_per_cu; err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_MAX_WAVES_PER_CU, &max_waves_per_cu); DeviceErrorCheck(err); prop->maxThreadsPerMultiProcessor = prop->warpSize * max_waves_per_cu; // Get memory properties err = hsa_amd_agent_iterate_memory_pools(_hsaAgent, get_pool_info, prop); if (err == HSA_STATUS_INFO_BREAK) { err = HSA_STATUS_SUCCESS; } DeviceErrorCheck(err); // Get the size of the pool we are using for Accelerator Memory allocations: hsa_region_t* am_region = static_cast<hsa_region_t*>(_acc.get_hsa_am_region()); err = hsa_region_get_info(*am_region, HSA_REGION_INFO_SIZE, &prop->totalGlobalMem); DeviceErrorCheck(err); // maxSharedMemoryPerMultiProcessor should be as the same as group memory size. // Group memory will not be paged out, so, the physical memory size is the total shared memory // size, and also equal to the group pool size. prop->maxSharedMemoryPerMultiProcessor = prop->totalGlobalMem; // Get Max memory clock frequency err = hsa_region_get_info(*am_region, (hsa_region_info_t)HSA_AMD_REGION_INFO_MAX_CLOCK_FREQUENCY, &prop->memoryClockRate); DeviceErrorCheck(err); prop->memoryClockRate *= 1000.0; // convert Mhz to Khz. // Get global memory bus width in bits err = hsa_region_get_info(*am_region, (hsa_region_info_t)HSA_AMD_REGION_INFO_BUS_WIDTH, &prop->memoryBusWidth); DeviceErrorCheck(err); // Set feature flags - these are all mandatory for HIP on HCC path: // Some features are under-development and future revs may support flags that are currently 0. // Reporting of these flags should be synchronized with the HIP_ARCH* compile-time defines in // hip_runtime.h prop->arch.hasGlobalInt32Atomics = 1; prop->arch.hasGlobalFloatAtomicExch = 1; prop->arch.hasSharedInt32Atomics = 1; prop->arch.hasSharedFloatAtomicExch = 1; prop->arch.hasFloatAtomicAdd = 1; // supported with CAS loop, but is supported prop->arch.hasGlobalInt64Atomics = 1; prop->arch.hasSharedInt64Atomics = 1; prop->arch.hasDoubles = 1; prop->arch.hasWarpVote = 1; prop->arch.hasWarpBallot = 1; prop->arch.hasWarpShuffle = 1; prop->arch.hasFunnelShift = 0; // TODO-hcc prop->arch.hasThreadFenceSystem = 1; prop->arch.hasSyncThreadsExt = 0; // TODO-hcc prop->arch.hasSurfaceFuncs = 0; // TODO-hcc prop->arch.has3dGrid = 1; prop->arch.hasDynamicParallelism = 0; prop->concurrentKernels = 1; // All ROCm hardware supports executing multiple kernels concurrently prop->canMapHostMemory = 1; // All ROCm devices can map host memory prop->totalConstMem = 16384; #if 0 // TODO - code broken below since it always returns 1. // Are the flags part of the context or part of the device? if ( _device_flags | hipDeviceMapHost) { prop->canMapHostMemory = 1; } else { prop->canMapHostMemory = 0; } #endif // Get profile hsa_profile_t agent_profile; err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_PROFILE, &agent_profile); DeviceErrorCheck(err); if(agent_profile == HSA_PROFILE_FULL) { prop->integrated = 1; } return e; } //================================================================================================= // ihipCtx_t //================================================================================================= ihipCtx_t::ihipCtx_t(ihipDevice_t* device, unsigned deviceCnt, unsigned flags) : _ctxFlags(flags), _device(device), _criticalData(this, deviceCnt) { // locked_reset(); LockedAccessor_CtxCrit_t crit(_criticalData); _defaultStream = new ihipStream_t(this, getDevice()->_acc.get_default_view(), hipStreamDefault); crit->addStream(_defaultStream); // Reset peer list to just me: crit->resetPeerWatchers(this); tprintf(DB_SYNC, "created ctx with defaultStream=%p (%s)\n", _defaultStream, ToString(_defaultStream).c_str()); }; ihipCtx_t::~ihipCtx_t() { if (_defaultStream) { delete _defaultStream; _defaultStream = NULL; } } // Reset the device - this is called from hipDeviceReset. // Device may be reset multiple times, and may be reset after init. void ihipCtx_t::locked_reset() { // Obtain mutex access to the device critical data, release by destructor LockedAccessor_CtxCrit_t crit(_criticalData); //--- // Wait for pending activity to complete? TODO - check if this is required behavior: tprintf(DB_SYNC, "locked_reset waiting for activity to complete.\n"); // Reset and remove streams: // Delete all created streams including the default one. for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end(); streamI++) { ihipStream_t* stream = *streamI; (*streamI)->locked_wait(); tprintf(DB_SYNC, " delete %s\n", ToString(stream).c_str()); delete stream; } // Clear the list. crit->streams().clear(); // Create a fresh default stream and add it: _defaultStream = new ihipStream_t(this, getDevice()->_acc.get_default_view(), hipStreamDefault); crit->addStream(_defaultStream); #if 0 // Reset peer list to just me: crit->resetPeerWatchers(this); // Reset and release all memory stored in the tracker: // Reset will remove peer mapping so don't need to do this explicitly. // FIXME - This is clearly a non-const action! Is this a context reset or a device reset - maybe should reference count? ihipDevice_t *device = getWriteableDevice(); device->_state = 0; am_memtracker_reset(device->_acc); #endif }; //--- std::string ihipCtx_t::toString() const { std::ostringstream ss; ss << this; return ss.str(); }; //---- //================================================================================================= // Utility functions, these are not part of the public HIP API //================================================================================================= //================================================================================================= // This called for submissions that are sent to the null/default stream. This routine ensures // that this new command waits for activity in the other streams to complete before proceeding. // // HIP_SYNC_NULL_STREAM=0 does all dependency resolutiokn on the GPU // HIP_SYNC_NULL_STREAM=1 s legacy non-optimal mode which conservatively waits on host. // // If waitOnSelf is set, this additionally waits for the default stream to empty. // In new HIP_SYNC_NULL_STREAM=0 mode, this enqueues a marker which causes the default stream to // wait for other activity, but doesn't actually block the host. If host blocking is desired, the // caller should set syncHost. // // syncToHost causes host to wait for the stream to finish. // Note HIP_SYNC_NULL_STREAM=1 path always sync to Host. void ihipCtx_t::locked_syncDefaultStream(bool waitOnSelf, bool syncHost) { LockedAccessor_CtxCrit_t crit(_criticalData); tprintf(DB_SYNC, "syncDefaultStream \n"); // Vector of ops sent to each stream that will complete before ops sent to null stream: std::vector<hc::completion_future> depOps; for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end(); streamI++) { ihipStream_t* stream = *streamI; // Don't wait for streams that have "opted-out" of syncing with NULL stream. // And - don't wait for the NULL stream, unless waitOnSelf specified. bool waitThisStream = (!(stream->_flags & hipStreamNonBlocking)) && (waitOnSelf || (stream != _defaultStream)); if (HIP_SYNC_NULL_STREAM) { if (waitThisStream) { stream->locked_wait(); } } else { if (waitThisStream) { LockedAccessor_StreamCrit_t streamCrit(stream->_criticalData); // The last marker will provide appropriate visibility: if (!streamCrit->_av.get_is_empty()) { depOps.push_back(streamCrit->_av.create_marker(hc::accelerator_scope)); tprintf(DB_SYNC, " push marker to wait for stream=%s\n", ToString(stream).c_str()); } else { tprintf(DB_SYNC, " skipped stream=%s since it is empty\n", ToString(stream).c_str()); } } } } // Enqueue a barrier to wait on all the barriers we sent above: if (!HIP_SYNC_NULL_STREAM && !depOps.empty()) { LockedAccessor_StreamCrit_t defaultStreamCrit(_defaultStream->_criticalData); tprintf(DB_SYNC, " null-stream wait on %zu non-empty streams. sync_host=%d\n", depOps.size(), syncHost); hc::completion_future defaultCf = defaultStreamCrit->_av.create_blocking_marker( depOps.begin(), depOps.end(), hc::accelerator_scope); if (syncHost) { defaultCf.wait(); // TODO - account for active or blocking here. } } tprintf(DB_SYNC, " syncDefaultStream depOps=%zu\n", depOps.size()); } //--- void ihipCtx_t::locked_removeStream(ihipStream_t* s) { LockedAccessor_CtxCrit_t crit(_criticalData); crit->streams().remove(s); } //--- // Heavyweight synchronization that waits on all streams, ignoring hipStreamNonBlocking flag. void ihipCtx_t::locked_waitAllStreams() { LockedAccessor_CtxCrit_t crit(_criticalData); tprintf(DB_SYNC, "waitAllStream\n"); for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end(); streamI++) { (*streamI)->locked_wait(); } } std::string HIP_DB_string(unsigned db) { std::string dbStr; bool first = true; for (int i = 0; i < DB_MAX_FLAG; i++) { if (db & (1 << i)) { if (!first) { dbStr += "+"; }; dbStr += dbName[i]._color; dbStr += dbName[i]._shortName; dbStr += KNRM; first = false; }; } return dbStr; } // Callback used to process HIP_DB input, supports either // integer or flag names separated by + std::string HIP_DB_callback(void* var_ptr, const char* envVarString) { int* var_ptr_int = static_cast<int*>(var_ptr); std::string e(envVarString); trim(&e); if (!e.empty() && isdigit(e.c_str()[0])) { long int v = strtol(envVarString, NULL, 0); *var_ptr_int = (int)(v); } else { *var_ptr_int = 0; std::vector<std::string> tokens; tokenize(e, '+', &tokens); for (auto t = tokens.begin(); t != tokens.end(); t++) { for (int i = 0; i < DB_MAX_FLAG; i++) { if (!strcmp(t->c_str(), dbName[i]._shortName)) { *var_ptr_int |= (1 << i); } // TODO - else throw error? } } } return HIP_DB_string(*var_ptr_int); ; } // Callback used to process list of visible devices. std::string HIP_VISIBLE_DEVICES_callback(void* var_ptr, const char* envVarString) { // Parse the string stream of env and store the device ids to g_hip_visible_devices global // variable std::string str = envVarString; std::istringstream ss(str); std::string device_id; // Clean up the defult value g_hip_visible_devices.clear(); g_visible_device = true; // Read the visible device numbers while (std::getline(ss, device_id, ',')) { if (atoi(device_id.c_str()) >= 0) { g_hip_visible_devices.push_back(atoi(device_id.c_str())); } else { // Any device number after invalid number will not present break; } } std::string valueString; // Print out the number of ids for (int i = 0; i < g_hip_visible_devices.size(); i++) { valueString += std::to_string((g_hip_visible_devices[i])); valueString += ' '; } return valueString; } // TODO - change last arg to pointer. void parseTrigger(std::string triggerString, std::vector<ProfTrigger>& profTriggers) { std::vector<std::string> tidApiTokens; tokenize(std::string(triggerString), ',', &tidApiTokens); for (auto t = tidApiTokens.begin(); t != tidApiTokens.end(); t++) { std::vector<std::string> oneToken; // std::cout << "token=" << *t << "\n"; tokenize(std::string(*t), '.', &oneToken); int tid = 1; uint64_t apiTrigger = 0; if (oneToken.size() == 1) { // the case with just apiNum apiTrigger = std::strtoull(oneToken[0].c_str(), nullptr, 0); } else if (oneToken.size() == 2) { // the case with tid.apiNum tid = std::strtoul(oneToken[0].c_str(), nullptr, 0); apiTrigger = std::strtoull(oneToken[1].c_str(), nullptr, 0); } else { throw ihipException(hipErrorRuntimeOther); // TODO -> bad env var? } if (tid > 10000) { throw ihipException(hipErrorRuntimeOther); // TODO -> bad env var? } else { profTriggers.resize(tid + 1); // std::cout << "tid:" << tid << " add: " << apiTrigger << "\n"; profTriggers[tid].add(apiTrigger); } } for (int tid = 1; tid < profTriggers.size(); tid++) { profTriggers[tid].sort(); profTriggers[tid].print(tid); } } void HipReadEnv() { /* * Environment variables */ g_hip_visible_devices.push_back(0); /* Set the default value of visible devices */ READ_ENV_I(release, HIP_PRINT_ENV, 0, "Print HIP environment variables."); //-- READ HIP_PRINT_ENV env first, since it has impact on later env var reading // TODO: In HIP/hcc, this variable blocks after both kernel commmands and data transfer. Maybe // should be bit-mask for each command type? READ_ENV_I(release, HIP_LAUNCH_BLOCKING, CUDA_LAUNCH_BLOCKING, "Make HIP kernel launches 'host-synchronous', so they block until any kernel " "launches. Alias: CUDA_LAUNCH_BLOCKING."); READ_ENV_S(release, HIP_LAUNCH_BLOCKING_KERNELS, 0, "Comma-separated list of kernel names to make host-synchronous, so they block until " "completed."); if (!HIP_LAUNCH_BLOCKING_KERNELS.empty()) { tokenize(HIP_LAUNCH_BLOCKING_KERNELS, ',', &g_hipLaunchBlockingKernels); } READ_ENV_I(release, HIP_API_BLOCKING, 0, "Make HIP APIs 'host-synchronous', so they block until completed. Impacts " "hipMemcpyAsync, hipMemsetAsync."); READ_ENV_I(release, HIP_HIDDEN_FREE_MEM, 0, "Amount of memory to hide from the free memory reported by hipMemGetInfo, specified " "in MB. Impacts hipMemGetInfo."); READ_ENV_C(release, HIP_DB, 0, "Print debug info. Bitmask (HIP_DB=0xff) or flags separated by '+' " "(HIP_DB=api+sync+mem+copy+fatbin)", HIP_DB_callback); if ((HIP_DB & (1 << DB_API)) && (HIP_TRACE_API == 0)) { // Set HIP_TRACE_API default before we read it, so it is printed correctly. HIP_TRACE_API = 1; } READ_ENV_I(release, HIP_TRACE_API, 0, "Trace each HIP API call. Print function name and return code to stderr as program " "executes."); READ_ENV_S(release, HIP_TRACE_API_COLOR, 0, "Color to use for HIP_API. None/Red/Green/Yellow/Blue/Magenta/Cyan/White"); READ_ENV_I(release, HIP_PROFILE_API, 0, "Add HIP API markers to ATP file generated with CodeXL. 0x1=short API name, " "0x2=full API name including args."); READ_ENV_S(release, HIP_DB_START_API, 0, "Comma-separated list of tid.api_seq_num for when to start debug and profiling."); READ_ENV_S(release, HIP_DB_STOP_API, 0, "Comma-separated list of tid.api_seq_num for when to stop debug and profiling."); READ_ENV_C(release, HIP_VISIBLE_DEVICES, CUDA_VISIBLE_DEVICES, "Only devices whose index is present in the sequence are visible to HIP " "applications and they are enumerated in the order of sequence.", HIP_VISIBLE_DEVICES_callback); READ_ENV_I(release, HIP_WAIT_MODE, 0, "Force synchronization mode. 1= force yield, 2=force spin, 0=defaults specified in " "application"); READ_ENV_I(release, HIP_FORCE_P2P_HOST, 0, "Force use of host/staging copy for peer-to-peer copies.1=always use copies, " "2=always return false for hipDeviceCanAccessPeer"); READ_ENV_I(release, HIP_FORCE_SYNC_COPY, 0, "Force all copies (even hipMemcpyAsync) to use sync copies"); READ_ENV_I(release, HIP_FAIL_SOC, 0, "Fault on Sub-Optimal-Copy, rather than use a slower but functional implementation. " " Bit 0x1=Fail on async copy with unpinned memory. Bit 0x2=Fail peer copy rather " "than use staging buffer copy"); READ_ENV_I(release, HIP_SYNC_HOST_ALLOC, 0, "Sync before and after all host memory allocations. May help stability"); READ_ENV_I(release, HIP_INIT_ALLOC, 0, "If not -1, initialize allocated memory to specified byte"); READ_ENV_I(release, HIP_SYNC_NULL_STREAM, 0, "Synchronize on host for null stream submissions"); READ_ENV_I(release, HIP_FORCE_NULL_STREAM, 0, "Force all stream allocations to secretly return the null stream"); READ_ENV_I(release, HIP_SYNC_STREAM_WAIT, 0, "hipStreamWaitEvent will synchronize to host"); READ_ENV_I(release, HIP_HOST_COHERENT, 0, "If set, all host memory will be allocated as fine-grained system memory. This " "allows threadfence_system to work but prevents host memory from being cached on " "GPU which may have performance impact."); READ_ENV_I(release, HCC_OPT_FLUSH, 0, "When set, use agent-scope fence operations rather than system-scope fence " "operationsflush when possible. This flag controls both HIP and HCC behavior."); READ_ENV_I(release, HIP_EVENT_SYS_RELEASE, 0, "If set, event are created with hipEventReleaseToSystem by default. If 0, events " "are created with hipEventReleaseToDevice by default. The defaults can be " "overridden by specifying hipEventReleaseToSystem or hipEventReleaseToDevice flag " "when creating the event."); READ_ENV_I(release, HIP_DUMP_CODE_OBJECT, 0, "If set, dump code object as __hip_dump_code_object[nnnn].o in the current directory," "where nnnn is the index number."); // Some flags have both compile-time and runtime flags - generate a warning if user enables the // runtime flag but the compile-time flag is disabled. if (HIP_DB && !COMPILE_HIP_DB) { fprintf(stderr, "warning: env var HIP_DB=0x%x but COMPILE_HIP_DB=0. (perhaps enable " "COMPILE_HIP_DB in src code before compiling?)\n", HIP_DB); } if (HIP_TRACE_API && !COMPILE_HIP_TRACE_API) { fprintf(stderr, "warning: env var HIP_TRACE_API=0x%x but COMPILE_HIP_TRACE_API=0. (perhaps enable " "COMPILE_HIP_TRACE_API in src code before compiling?)\n", HIP_DB); } if (HIP_TRACE_API) { HIP_DB |= 0x1; } if (HIP_PROFILE_API && !COMPILE_HIP_ATP_MARKER) { fprintf(stderr, "warning: env var HIP_PROFILE_API=0x%x but COMPILE_HIP_ATP_MARKER=0. (perhaps " "enable COMPILE_HIP_ATP_MARKER in src code before compiling?)\n", HIP_PROFILE_API); HIP_PROFILE_API = 0; } if (HIP_DB) { fprintf(stderr, "HIP_DB=0x%x [%s]\n", HIP_DB, HIP_DB_string(HIP_DB).c_str()); } std::transform(HIP_TRACE_API_COLOR.begin(), HIP_TRACE_API_COLOR.end(), HIP_TRACE_API_COLOR.begin(), ::tolower); if (HIP_TRACE_API_COLOR == "none") { API_COLOR = ""; API_COLOR_END = ""; } else if (HIP_TRACE_API_COLOR == "red") { API_COLOR = KRED; } else if (HIP_TRACE_API_COLOR == "green") { API_COLOR = KGRN; } else if (HIP_TRACE_API_COLOR == "yellow") { API_COLOR = KYEL; } else if (HIP_TRACE_API_COLOR == "blue") { API_COLOR = KBLU; } else if (HIP_TRACE_API_COLOR == "magenta") { API_COLOR = KMAG; } else if (HIP_TRACE_API_COLOR == "cyan") { API_COLOR = KCYN; } else if (HIP_TRACE_API_COLOR == "white") { API_COLOR = KWHT; } else { fprintf(stderr, "warning: env var HIP_TRACE_API_COLOR=%s must be " "None/Red/Green/Yellow/Blue/Magenta/Cyan/White", HIP_TRACE_API_COLOR.c_str()); }; parseTrigger(HIP_DB_START_API, g_dbStartTriggers); parseTrigger(HIP_DB_STOP_API, g_dbStopTriggers); }; //--- // Function called one-time at initialization time to construct a table of all GPU devices. // HIP/CUDA uses integer "deviceIds" - these are indexes into this table. // AMP maintains a table of accelerators, but some are emulated - ie for debug or CPU. // This function creates a vector with only the GPU accelerators. // It is called with C++11 call_once, which provided thread-safety. void ihipInit() { #if COMPILE_HIP_ATP_MARKER amdtInitializeActivityLogger(); amdtScopedMarker("ihipInit", "HIP", NULL); #endif HipReadEnv(); /* * Build a table of valid compute devices. */ auto accs = hc::accelerator::get_all(); int deviceCnt = 0; for (int i = 0; i < accs.size(); i++) { if (!accs[i].get_is_emulated()) { deviceCnt++; } }; // Make sure the hip visible devices are within the deviceCnt range for (int i = 0; i < g_hip_visible_devices.size(); i++) { if (g_hip_visible_devices[i] >= deviceCnt) { // Make sure any DeviceID after invalid DeviceID will be erased. g_hip_visible_devices.resize(i); break; } } hsa_status_t err = hsa_iterate_agents(findCpuAgent, &g_cpu_agent); if (err != HSA_STATUS_INFO_BREAK) { // didn't find a CPU. throw ihipException(hipErrorRuntimeOther); } g_deviceArray = new ihipDevice_t*[deviceCnt]; g_deviceCnt = 0; for (int i = 0; i < accs.size(); i++) { // check if the device id is included in the HIP_VISIBLE_DEVICES env variable if (!accs[i].get_is_emulated()) { if (std::find(g_hip_visible_devices.begin(), g_hip_visible_devices.end(), (i - 1)) == g_hip_visible_devices.end() && g_visible_device) { // If device is not in visible devices list, ignore continue; } g_deviceArray[g_deviceCnt] = new ihipDevice_t(g_deviceCnt, deviceCnt, accs[i]); g_deviceCnt++; } } g_allAgents = static_cast<hsa_agent_t*>(malloc((g_deviceCnt + 1) * sizeof(hsa_agent_t))); g_allAgents[0] = g_cpu_agent; for (int i = 0; i < g_deviceCnt; i++) { g_allAgents[i + 1] = g_deviceArray[i]->_hsaAgent; } g_numLogicalThreads = std::thread::hardware_concurrency(); // If HIP_VISIBLE_DEVICES is not set, make sure all devices are initialized if (!g_visible_device) { assert(deviceCnt == g_deviceCnt); } tprintf(DB_SYNC, "pid=%u %-30s g_numLogicalThreads=%u\n", getpid(), "<ihipInit>", g_numLogicalThreads); } namespace hip_impl { hipError_t hip_init() { static std::once_flag hip_initialized; std::call_once(hip_initialized, ihipInit); ihipCtxStackUpdate(); return hipSuccess; } } hipError_t ihipStreamSynchronize(hipStream_t stream) { hipError_t e = hipSuccess; if (stream == hipStreamNull) { ihipCtx_t* ctx = ihipGetTlsDefaultCtx(); ctx->locked_syncDefaultStream(true /*waitOnSelf*/, true /*syncToHost*/); } else { // note this does not synchornize with the NULL stream: stream->locked_wait(); e = hipSuccess; } return e; } void ihipStreamCallbackHandler(ihipStreamCallback_t* cb) { hipError_t e = hipSuccess; // Synchronize stream tprintf(DB_SYNC, "ihipStreamCallbackHandler wait on stream %s\n", ToString(cb->_stream).c_str()); e = ihipStreamSynchronize(cb->_stream); // Call registered callback function cb->_callback(cb->_stream, e, cb->_userData); delete cb; } //--- // Get the stream to use for a command submission. // // If stream==NULL synchronize appropriately with other streams and return the default av for the // device. If stream is valid, return the AV to use. hipStream_t ihipSyncAndResolveStream(hipStream_t stream) { if (stream == hipStreamNull) { // Submitting to NULL stream, call locked_syncDefaultStream to wait for all other streams: ihipCtx_t* ctx = ihipGetTlsDefaultCtx(); tprintf(DB_SYNC, "ihipSyncAndResolveStream %s wait on default stream\n", ToString(stream).c_str()); #ifndef HIP_API_PER_THREAD_DEFAULT_STREAM ctx->locked_syncDefaultStream(false, false); #endif return ctx->_defaultStream; } else { // Submitting to a "normal" stream, just wait for null stream: if (!(stream->_flags & hipStreamNonBlocking)) { if (HIP_SYNC_NULL_STREAM) { tprintf(DB_SYNC, "ihipSyncAndResolveStream %s host-wait on default stream\n", ToString(stream).c_str()); stream->getCtx()->_defaultStream->locked_wait(); } else { ihipStream_t* defaultStream = stream->getCtx()->_defaultStream; bool needGatherMarker = false; // used to gather together other markers. hc::completion_future dcf; { LockedAccessor_StreamCrit_t defaultStreamCrit(defaultStream->criticalData()); // TODO - could call create_blocking_marker(queue) or uses existing marker. if (!defaultStreamCrit->_av.get_is_empty()) { needGatherMarker = true; tprintf(DB_SYNC, " %s adding marker to default %s for dependency\n", ToString(stream).c_str(), ToString(defaultStream).c_str()); dcf = defaultStreamCrit->_av.create_marker(hc::accelerator_scope); } else { tprintf(DB_SYNC, " %s skipping marker since default stream is empty\n", ToString(stream).c_str()); } } if (needGatherMarker) { // ensure any commands sent to this stream wait on the NULL stream before // continuing LockedAccessor_StreamCrit_t thisStreamCrit(stream->criticalData()); // TODO - could be "noret" version of create_blocking_marker thisStreamCrit->_av.create_blocking_marker(dcf, hc::accelerator_scope); tprintf( DB_SYNC, " %s adding marker to wait for freshly recorded default-stream marker \n", ToString(stream).c_str()); } } } return stream; } } void ihipPrintKernelLaunch(const char* kernelName, const grid_launch_parm* lp, const hipStream_t stream) { if ((HIP_TRACE_API & (1 << TRACE_KCMD)) || HIP_PROFILE_API || (COMPILE_HIP_DB & HIP_TRACE_API)) { std::stringstream os; os << tls_tidInfo.pid() << " " << tls_tidInfo.tid() << "." << tls_tidInfo.apiSeqNum() << " hipLaunchKernel '" << kernelName << "'" << " gridDim:" << lp->grid_dim << " groupDim:" << lp->group_dim << " sharedMem:+" << lp->dynamic_group_mem_bytes << " " << *stream; if (COMPILE_HIP_DB && HIP_TRACE_API) { std::string fullStr; recordApiTrace(&fullStr, os.str()); } if (HIP_PROFILE_API == 0x1) { std::string shortAtpString("hipLaunchKernel:"); shortAtpString += kernelName; MARKER_BEGIN(shortAtpString.c_str(), "HIP"); } else if (HIP_PROFILE_API == 0x2) { MARKER_BEGIN(os.str().c_str(), "HIP"); } } } // Called just before a kernel is launched from hipLaunchKernel. // Allows runtime to track some information about the stream. hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, dim3 block, grid_launch_parm* lp, const char* kernelNameStr) { stream = ihipSyncAndResolveStream(stream); lp->grid_dim.x = grid.x; lp->grid_dim.y = grid.y; lp->grid_dim.z = grid.z; lp->group_dim.x = block.x; lp->group_dim.y = block.y; lp->group_dim.z = block.z; lp->barrier_bit = barrier_bit_queue_default; lp->launch_fence = -1; auto crit = stream->lockopen_preKernelCommand(); lp->av = &(crit->_av); lp->cf = nullptr; ihipPrintKernelLaunch(kernelNameStr, lp, stream); return (stream); } hipStream_t ihipPreLaunchKernel(hipStream_t stream, size_t grid, dim3 block, grid_launch_parm* lp, const char* kernelNameStr) { stream = ihipSyncAndResolveStream(stream); lp->grid_dim.x = grid; lp->grid_dim.y = 1; lp->grid_dim.z = 1; lp->group_dim.x = block.x; lp->group_dim.y = block.y; lp->group_dim.z = block.z; lp->barrier_bit = barrier_bit_queue_default; lp->launch_fence = -1; auto crit = stream->lockopen_preKernelCommand(); lp->av = &(crit->_av); lp->cf = nullptr; ihipPrintKernelLaunch(kernelNameStr, lp, stream); return (stream); } hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, size_t block, grid_launch_parm* lp, const char* kernelNameStr) { stream = ihipSyncAndResolveStream(stream); lp->grid_dim.x = grid.x; lp->grid_dim.y = grid.y; lp->grid_dim.z = grid.z; lp->group_dim.x = block; lp->group_dim.y = 1; lp->group_dim.z = 1; lp->barrier_bit = barrier_bit_queue_default; lp->launch_fence = -1; auto crit = stream->lockopen_preKernelCommand(); lp->av = &(crit->_av); lp->cf = nullptr; ihipPrintKernelLaunch(kernelNameStr, lp, stream); return (stream); } hipStream_t ihipPreLaunchKernel(hipStream_t stream, size_t grid, size_t block, grid_launch_parm* lp, const char* kernelNameStr) { stream = ihipSyncAndResolveStream(stream); lp->grid_dim.x = grid; lp->grid_dim.y = 1; lp->grid_dim.z = 1; lp->group_dim.x = block; lp->group_dim.y = 1; lp->group_dim.z = 1; lp->barrier_bit = barrier_bit_queue_default; lp->launch_fence = -1; auto crit = stream->lockopen_preKernelCommand(); lp->av = &(crit->_av); lp->cf = nullptr; ihipPrintKernelLaunch(kernelNameStr, lp, stream); return (stream); } //--- // Called after kernel finishes execution. // This releases the lock on the stream. void ihipPostLaunchKernel(const char* kernelName, hipStream_t stream, grid_launch_parm& lp) { tprintf(DB_SYNC, "ihipPostLaunchKernel, unlocking stream\n"); stream->lockclose_postKernelCommand(kernelName, lp.av); if (HIP_PROFILE_API) { MARKER_END(); } } //================================================================================================= // HIP API Implementation // // Implementor notes: // _ All functions should call HIP_INIT_API as first action: // HIP_INIT_API(<function_arguments>); // // - ALl functions should use ihipLogStatus to return error code (not return error directly). //================================================================================================= // //--- //------------------------------------------------------------------------------------------------- const char* ihipErrorString(hipError_t hip_error) { switch (hip_error) { case hipSuccess: return "hipSuccess"; case hipErrorOutOfMemory: return "hipErrorOutOfMemory"; case hipErrorNotInitialized: return "hipErrorNotInitialized"; case hipErrorDeinitialized: return "hipErrorDeinitialized"; case hipErrorProfilerDisabled: return "hipErrorProfilerDisabled"; case hipErrorProfilerNotInitialized: return "hipErrorProfilerNotInitialized"; case hipErrorProfilerAlreadyStarted: return "hipErrorProfilerAlreadyStarted"; case hipErrorProfilerAlreadyStopped: return "hipErrorProfilerAlreadyStopped"; case hipErrorInvalidImage: return "hipErrorInvalidImage"; case hipErrorInvalidContext: return "hipErrorInvalidContext"; case hipErrorContextAlreadyCurrent: return "hipErrorContextAlreadyCurrent"; case hipErrorMapFailed: return "hipErrorMapFailed"; case hipErrorUnmapFailed: return "hipErrorUnmapFailed"; case hipErrorArrayIsMapped: return "hipErrorArrayIsMapped"; case hipErrorAlreadyMapped: return "hipErrorAlreadyMapped"; case hipErrorNoBinaryForGpu: return "hipErrorNoBinaryForGpu"; case hipErrorAlreadyAcquired: return "hipErrorAlreadyAcquired"; case hipErrorNotMapped: return "hipErrorNotMapped"; case hipErrorNotMappedAsArray: return "hipErrorNotMappedAsArray"; case hipErrorNotMappedAsPointer: return "hipErrorNotMappedAsPointer"; case hipErrorECCNotCorrectable: return "hipErrorECCNotCorrectable"; case hipErrorUnsupportedLimit: return "hipErrorUnsupportedLimit"; case hipErrorContextAlreadyInUse: return "hipErrorContextAlreadyInUse"; case hipErrorPeerAccessUnsupported: return "hipErrorPeerAccessUnsupported"; case hipErrorInvalidKernelFile: return "hipErrorInvalidKernelFile"; case hipErrorInvalidGraphicsContext: return "hipErrorInvalidGraphicsContext"; case hipErrorInvalidSource: return "hipErrorInvalidSource"; case hipErrorFileNotFound: return "hipErrorFileNotFound"; case hipErrorSharedObjectSymbolNotFound: return "hipErrorSharedObjectSymbolNotFound"; case hipErrorSharedObjectInitFailed: return "hipErrorSharedObjectInitFailed"; case hipErrorOperatingSystem: return "hipErrorOperatingSystem"; case hipErrorSetOnActiveProcess: return "hipErrorSetOnActiveProcess"; case hipErrorInvalidHandle: return "hipErrorInvalidHandle"; case hipErrorNotFound: return "hipErrorNotFound"; case hipErrorIllegalAddress: return "hipErrorIllegalAddress"; case hipErrorMissingConfiguration: return "hipErrorMissingConfiguration"; case hipErrorMemoryAllocation: return "hipErrorMemoryAllocation"; case hipErrorInitializationError: return "hipErrorInitializationError"; case hipErrorLaunchFailure: return "hipErrorLaunchFailure"; case hipErrorPriorLaunchFailure: return "hipErrorPriorLaunchFailure"; case hipErrorLaunchTimeOut: return "hipErrorLaunchTimeOut"; case hipErrorLaunchOutOfResources: return "hipErrorLaunchOutOfResources"; case hipErrorInvalidDeviceFunction: return "hipErrorInvalidDeviceFunction"; case hipErrorInvalidConfiguration: return "hipErrorInvalidConfiguration"; case hipErrorInvalidDevice: return "hipErrorInvalidDevice"; case hipErrorInvalidValue: return "hipErrorInvalidValue"; case hipErrorInvalidDevicePointer: return "hipErrorInvalidDevicePointer"; case hipErrorInvalidMemcpyDirection: return "hipErrorInvalidMemcpyDirection"; case hipErrorUnknown: return "hipErrorUnknown"; case hipErrorInvalidResourceHandle: return "hipErrorInvalidResourceHandle"; case hipErrorNotReady: return "hipErrorNotReady"; case hipErrorNoDevice: return "hipErrorNoDevice"; case hipErrorPeerAccessAlreadyEnabled: return "hipErrorPeerAccessAlreadyEnabled"; case hipErrorPeerAccessNotEnabled: return "hipErrorPeerAccessNotEnabled"; case hipErrorRuntimeMemory: return "hipErrorRuntimeMemory"; case hipErrorRuntimeOther: return "hipErrorRuntimeOther"; case hipErrorHostMemoryAlreadyRegistered: return "hipErrorHostMemoryAlreadyRegistered"; case hipErrorHostMemoryNotRegistered: return "hipErrorHostMemoryNotRegistered"; case hipErrorTbd: return "hipErrorTbd"; default: return "hipErrorUnknown"; }; }; // Returns true if copyEngineCtx can see the memory allocated on dstCtx and srcCtx. // The peer-list for a context controls which contexts have access to the memory allocated on that // context. So we check dstCtx's and srcCtx's peerList to see if the both include thisCtx. bool ihipStream_t::canSeeMemory(const ihipCtx_t* copyEngineCtx, const hc::AmPointerInfo* dstPtrInfo, const hc::AmPointerInfo* srcPtrInfo) { if (copyEngineCtx == nullptr) { return false; } // Make sure this is a device-to-device copy with all memory available to the requested copy // engine // // TODO - pointer-info stores a deviceID not a context,may have some unusual side-effects here: if (dstPtrInfo->_sizeBytes == 0) { return false; } else if (dstPtrInfo->_appId != -1) { #if USE_APP_PTR_FOR_CTX ihipCtx_t* dstCtx = static_cast<ihipCtx_t*>(dstPtrInfo->_appPtr); #else ihipCtx_t* dstCtx = ihipGetPrimaryCtx(dstPtrInfo->_appId); #endif if (copyEngineCtx != dstCtx) { // Only checks peer list if contexts are different LockedAccessor_CtxCrit_t ctxCrit(dstCtx->criticalData()); #if DB_PEER_CTX std::cerr << "checking peer : copyEngineCtx =" << copyEngineCtx << " dstCtx =" << dstCtx << " peerCnt=" << ctxCrit->peerCnt() << "\n"; #endif if (!ctxCrit->isPeerWatcher(copyEngineCtx)) { return false; }; } } // TODO - pointer-info stores a deviceID not a context,may have some unusual side-effects here: if (srcPtrInfo->_sizeBytes == 0) { return false; } else if (srcPtrInfo->_appId != -1) { #if USE_APP_PTR_FOR_CTX ihipCtx_t* srcCtx = static_cast<ihipCtx_t*>(srcPtrInfo->_appPtr); #else ihipCtx_t* srcCtx = ihipGetPrimaryCtx(srcPtrInfo->_appId); #endif if (copyEngineCtx != srcCtx) { // Only checks peer list if contexts are different LockedAccessor_CtxCrit_t ctxCrit(srcCtx->criticalData()); #if DB_PEER_CTX std::cerr << "checking peer : copyEngineCtx =" << copyEngineCtx << " srcCtx =" << srcCtx << " peerCnt=" << ctxCrit->peerCnt() << "\n"; #endif if (!ctxCrit->isPeerWatcher(copyEngineCtx)) { return false; }; } } return true; }; #define CASE_STRING(X) \ case X: \ return #X; \ break; const char* hipMemcpyStr(unsigned memKind) { switch (memKind) { CASE_STRING(hipMemcpyHostToHost); CASE_STRING(hipMemcpyHostToDevice); CASE_STRING(hipMemcpyDeviceToHost); CASE_STRING(hipMemcpyDeviceToDevice); CASE_STRING(hipMemcpyDefault); default: return ("unknown memcpyKind"); }; } const char* hcMemcpyStr(hc::hcCommandKind memKind) { using namespace hc; switch (memKind) { CASE_STRING(hcMemcpyHostToHost); CASE_STRING(hcMemcpyHostToDevice); CASE_STRING(hcMemcpyDeviceToHost); CASE_STRING(hcMemcpyDeviceToDevice); // CASE_STRING(hcMemcpyDefault); default: return ("unknown memcpyKind"); }; } // Resolve hipMemcpyDefault to a known type. unsigned ihipStream_t::resolveMemcpyDirection(bool srcInDeviceMem, bool dstInDeviceMem) { hipMemcpyKind kind = hipMemcpyDefault; if (srcInDeviceMem && dstInDeviceMem) { kind = hipMemcpyDeviceToDevice; } if (srcInDeviceMem && !dstInDeviceMem) { kind = hipMemcpyDeviceToHost; } if (!srcInDeviceMem && !dstInDeviceMem) { kind = hipMemcpyHostToHost; } if (!srcInDeviceMem && dstInDeviceMem) { kind = hipMemcpyHostToDevice; } assert(kind != hipMemcpyDefault); return kind; } // hipMemKind must be "resolved" to a specific direction - cannot be default. void ihipStream_t::resolveHcMemcpyDirection(unsigned hipMemKind, const hc::AmPointerInfo* dstPtrInfo, const hc::AmPointerInfo* srcPtrInfo, hc::hcCommandKind* hcCopyDir, ihipCtx_t** copyDevice, bool* forceUnpinnedCopy) { // Ignore what the user tells us and always resolve the direction: // Some apps apparently rely on this. hipMemKind = resolveMemcpyDirection(srcPtrInfo->_isInDeviceMem, dstPtrInfo->_isInDeviceMem); switch (hipMemKind) { case hipMemcpyHostToHost: *hcCopyDir = hc::hcMemcpyHostToHost; break; case hipMemcpyHostToDevice: *hcCopyDir = hc::hcMemcpyHostToDevice; break; case hipMemcpyDeviceToHost: *hcCopyDir = hc::hcMemcpyDeviceToHost; break; case hipMemcpyDeviceToDevice: *hcCopyDir = hc::hcMemcpyDeviceToDevice; break; default: throw ihipException(hipErrorRuntimeOther); }; if (srcPtrInfo->_isInDeviceMem) { *copyDevice = ihipGetPrimaryCtx(srcPtrInfo->_appId); } else if (dstPtrInfo->_isInDeviceMem) { *copyDevice = ihipGetPrimaryCtx(dstPtrInfo->_appId); } else { *copyDevice = nullptr; } *forceUnpinnedCopy = false; if (canSeeMemory(*copyDevice, dstPtrInfo, srcPtrInfo)) { if (HIP_FORCE_P2P_HOST & 0x1) { *forceUnpinnedCopy = true; tprintf(DB_COPY, "Copy engine (dev:%d agent=0x%lx) can see src and dst but " "HIP_FORCE_P2P_HOST=0, forcing copy through staging buffers.\n", *copyDevice ? (*copyDevice)->getDeviceNum() : -1, *copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0); } else { tprintf(DB_COPY, "Copy engine (dev:%d agent=0x%lx) can see src and dst.\n", *copyDevice ? (*copyDevice)->getDeviceNum() : -1, *copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0); } } else { *forceUnpinnedCopy = true; tprintf(DB_COPY, "Copy engine(dev:%d agent=0x%lx) cannot see both host and device pointers - " "forcing copy with unpinned engine.\n", *copyDevice ? (*copyDevice)->getDeviceNum() : -1, *copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0); if (HIP_FAIL_SOC & 0x2) { fprintf(stderr, "HIP_FAIL_SOC: P2P: copy engine(dev:%d agent=0x%lx) cannot see both host and " "device pointers - forcing copy with unpinned engine.\n", *copyDevice ? (*copyDevice)->getDeviceNum() : -1, *copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0); throw ihipException(hipErrorRuntimeOther); } } } void printPointerInfo(unsigned dbFlag, const char* tag, const void* ptr, const hc::AmPointerInfo& ptrInfo) { tprintf(dbFlag, " %s=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d " "registered=%d allocSeqNum=%zu, appAllocationFlags=%x, appPtr=%p\n", tag, ptr, ptrInfo._hostPointer, ptrInfo._devicePointer, ptrInfo._sizeBytes, ptrInfo._appId, ptrInfo._sizeBytes != 0, ptrInfo._isInDeviceMem, !ptrInfo._isAmManaged, ptrInfo._allocSeqNum, ptrInfo._appAllocationFlags, ptrInfo._appPtr); } // the pointer-info as returned by HC refers to the allocation // This routine modifies the pointer-info so it appears to refer to the specific ptr and sizeBytes. // TODO -remove this when HCC uses HSA pointer info functions directly. void tailorPtrInfo(hc::AmPointerInfo* ptrInfo, const void* ptr, size_t sizeBytes) { const char* ptrc = static_cast<const char*>(ptr); if (ptrInfo->_sizeBytes == 0) { // invalid ptrInfo, don't modify return; } else if (ptrInfo->_isInDeviceMem) { assert(ptrInfo->_devicePointer != nullptr); std::ptrdiff_t diff = ptrc - static_cast<const char*>(ptrInfo->_devicePointer); // TODO : assert-> runtime assert that only appears in debug mode assert(diff >= 0); assert(diff <= ptrInfo->_sizeBytes); ptrInfo->_devicePointer = const_cast<void*>(ptr); if (ptrInfo->_hostPointer != nullptr) { ptrInfo->_hostPointer = static_cast<char*>(ptrInfo->_hostPointer) + diff; } } else { assert(ptrInfo->_hostPointer != nullptr); std::ptrdiff_t diff = ptrc - static_cast<const char*>(ptrInfo->_hostPointer); // TODO : assert-> runtime assert that only appears in debug mode assert(diff >= 0); assert(diff <= ptrInfo->_sizeBytes); ptrInfo->_hostPointer = const_cast<void*>(ptr); if (ptrInfo->_devicePointer != nullptr) { ptrInfo->_devicePointer = static_cast<char*>(ptrInfo->_devicePointer) + diff; } } assert(sizeBytes <= ptrInfo->_sizeBytes); ptrInfo->_sizeBytes = sizeBytes; }; bool getTailoredPtrInfo(const char* tag, hc::AmPointerInfo* ptrInfo, const void* ptr, size_t sizeBytes) { bool tracked = (hc::am_memtracker_getinfo(ptrInfo, ptr) == AM_SUCCESS); printPointerInfo(DB_COPY, tag, ptr, *ptrInfo); if (tracked) { tailorPtrInfo(ptrInfo, ptr, sizeBytes); printPointerInfo(DB_COPY, " mod", ptr, *ptrInfo); } return tracked; }; // TODO : For registered and host memory, if the portable flag is set, we need to recognize that and // perform appropriate copy operation. What can happen now is that Portable memory is mapped into // multiple devices but Peer access is not enabled. i The peer detection logic doesn't see that the // memory is already mapped and so tries to use an unpinned copy algorithm. If this is PinInPlace, // then an error can occur. Need to track Portable flag correctly or use new RT functionality to // query the peer status for the pointer. // // TODO - remove kind parm from here or use it below? void ihipStream_t::locked_copySync(void* dst, const void* src, size_t sizeBytes, unsigned kind, bool resolveOn) { ihipCtx_t* ctx = this->getCtx(); const ihipDevice_t* device = ctx->getDevice(); if (device == NULL) { throw ihipException(hipErrorInvalidDevice); } hc::accelerator acc; #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); #else hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0); #endif bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes); bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes); // Some code in HCC and in printPointerInfo uses _sizeBytes==0 as an indication ptr is not // valid, so check it here: if (!dstTracked) { assert(dstPtrInfo._sizeBytes == 0); } if (!srcTracked) { assert(srcPtrInfo._sizeBytes == 0); } hc::hcCommandKind hcCopyDir; ihipCtx_t* copyDevice; bool forceUnpinnedCopy; resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, &copyDevice, &forceUnpinnedCopy); { LockedAccessor_StreamCrit_t crit(_criticalData); tprintf(DB_COPY, "copySync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) src=%p(phys_dev:%d, " "isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n", copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId, dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem, sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy); printPointerInfo(DB_COPY, " dst", dst, dstPtrInfo); printPointerInfo(DB_COPY, " src", src, srcPtrInfo); crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo, copyDevice ? &copyDevice->getDevice()->_acc : nullptr, forceUnpinnedCopy); } } void ihipStream_t::locked_copy2DSync(void* dst, const void* src, size_t width, size_t height, size_t srcPitch, size_t dstPitch, unsigned kind, bool resolveOn) { ihipCtx_t* ctx = this->getCtx(); const ihipDevice_t* device = ctx->getDevice(); if (device == NULL) { throw ihipException(hipErrorInvalidDevice); } size_t sizeBytes = width*height; hc::accelerator acc; #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); #else hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0); #endif bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes); bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes); // Some code in HCC and in printPointerInfo uses _sizeBytes==0 as an indication ptr is not // // valid, so check it here: if (!dstTracked) { assert(dstPtrInfo._sizeBytes == 0); } if (!srcTracked) { assert(srcPtrInfo._sizeBytes == 0); } hc::hcCommandKind hcCopyDir; ihipCtx_t* copyDevice; bool forceUnpinnedCopy; resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, &copyDevice, &forceUnpinnedCopy); { LockedAccessor_StreamCrit_t crit(_criticalData); tprintf(DB_COPY, "copy2DSync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) src=%p(phys_dev:%d, " "isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n", copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId, dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem, sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy); printPointerInfo(DB_COPY, " dst", dst, dstPtrInfo); printPointerInfo(DB_COPY, " src", src, srcPtrInfo); crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo, copyDevice ? &copyDevice->getDevice()->_acc : nullptr, forceUnpinnedCopy); } } void ihipStream_t::addSymbolPtrToTracker(hc::accelerator& acc, void* ptr, size_t sizeBytes) { #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo ptrInfo(NULL, ptr, ptr, sizeBytes, acc, true, false); #else hc::AmPointerInfo ptrInfo(NULL, ptr, sizeBytes, acc, true, false); #endif hc::am_memtracker_add(ptr, ptrInfo); } void ihipStream_t::lockedSymbolCopySync(hc::accelerator& acc, void* dst, void* src, size_t sizeBytes, size_t offset, unsigned kind) { if (kind == hipMemcpyHostToHost) { acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset, Kalmar::hcMemcpyHostToHost); } if (kind == hipMemcpyHostToDevice) { acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset); } if (kind == hipMemcpyDeviceToDevice) { acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset, Kalmar::hcMemcpyDeviceToDevice); } if (kind == hipMemcpyDeviceToHost) { acc.memcpy_symbol((void*)src, (void*)dst, sizeBytes, offset, Kalmar::hcMemcpyDeviceToHost); } } void ihipStream_t::lockedSymbolCopyAsync(hc::accelerator& acc, void* dst, void* src, size_t sizeBytes, size_t offset, unsigned kind) { // TODO - review - this looks broken , should not be adding pointers to tracker dynamically: if (kind == hipMemcpyHostToDevice) { #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); #else hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0); #endif bool srcTracked = (hc::am_memtracker_getinfo(&srcPtrInfo, src) == AM_SUCCESS); if (srcTracked) { addSymbolPtrToTracker(acc, dst, sizeBytes); locked_getAv()->copy_async((void*)src, dst, sizeBytes); } else { LockedAccessor_StreamCrit_t crit(_criticalData); this->wait(crit); acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset); } } if (kind == hipMemcpyDeviceToHost) { #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); #else hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0); #endif bool dstTracked = (hc::am_memtracker_getinfo(&dstPtrInfo, dst) == AM_SUCCESS); if (dstTracked) { addSymbolPtrToTracker(acc, src, sizeBytes); locked_getAv()->copy_async((void*)src, dst, sizeBytes); } else { LockedAccessor_StreamCrit_t crit(_criticalData); this->wait(crit); acc.memcpy_symbol((void*)src, (void*)dst, sizeBytes, offset, Kalmar::hcMemcpyDeviceToHost); } } } void ihipStream_t::locked_copyAsync(void* dst, const void* src, size_t sizeBytes, unsigned kind) { const ihipCtx_t* ctx = this->getCtx(); if ((ctx == nullptr) || (ctx->getDevice() == nullptr)) { tprintf(DB_COPY, "locked_copyAsync bad ctx or device\n"); throw ihipException(hipErrorInvalidDevice); } if (kind == hipMemcpyHostToHost) { tprintf(DB_COPY, "locked_copyAsync: H2H with memcpy"); // TODO - consider if we want to perhaps use the GPU SDMA engines anyway, to avoid the // host-side sync here and keep everything flowing on the GPU. /* As this is a CPU op, we need to wait until all the commands in current stream are finished. */ LockedAccessor_StreamCrit_t crit(_criticalData); this->wait(crit); memcpy(dst, src, sizeBytes); } else { hc::accelerator acc; #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); #else hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0); #endif tprintf(DB_COPY, "copyASync dst=%p src=%p, sz=%zu\n", dst, src, sizeBytes); bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes); bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes); hc::hcCommandKind hcCopyDir; ihipCtx_t* copyDevice; bool forceUnpinnedCopy; resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, &copyDevice, &forceUnpinnedCopy); tprintf(DB_COPY, " copyDev:%d dir=%s forceUnpinnedCopy=%d\n", copyDevice ? copyDevice->getDeviceNum() : -1, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy); // "tracked" really indicates if the pointer's virtual address is available in the GPU // address space. If both pointers are not tracked, we need to fall back to a sync copy. if (dstTracked && srcTracked && !forceUnpinnedCopy && copyDevice /*code below assumes this is !nullptr*/) { LockedAccessor_StreamCrit_t crit(_criticalData); // Perform fast asynchronous copy - we know copyDevice != NULL based on check above try { if (HIP_FORCE_SYNC_COPY) { crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo, &copyDevice->getDevice()->_acc, forceUnpinnedCopy); } else { crit->_av.copy_async_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo, &copyDevice->getDevice()->_acc); } } catch (Kalmar::runtime_exception) { throw ihipException(hipErrorRuntimeOther); }; if (HIP_API_BLOCKING) { tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for completion of hipMemcpyAsync(sz=%zu)\n", ToString(this).c_str(), sizeBytes); this->wait(crit); } } else { if (HIP_FAIL_SOC & 0x1) { fprintf(stderr, "HIP_FAIL_SOC failed, async_copy requested but could not be completed " "since src or dst not accesible to copy agent\n"); fprintf(stderr, "copyASync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) " "src=%p(phys_dev:%d, isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n", copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId, dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem, sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy); fprintf( stderr, " dst=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d\n", dst, dstPtrInfo._hostPointer, dstPtrInfo._devicePointer, dstPtrInfo._sizeBytes, dstPtrInfo._appId, dstTracked, dstPtrInfo._isInDeviceMem); fprintf( stderr, " src=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d\n", src, srcPtrInfo._hostPointer, srcPtrInfo._devicePointer, srcPtrInfo._sizeBytes, srcPtrInfo._appId, srcTracked, srcPtrInfo._isInDeviceMem); throw ihipException(hipErrorRuntimeOther); } // Perform slow synchronous copy: LockedAccessor_StreamCrit_t crit(_criticalData); crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo, copyDevice ? &copyDevice->getDevice()->_acc : nullptr, forceUnpinnedCopy); } } } void ihipStream_t::locked_copy2DAsync(void* dst, const void* src, size_t width, size_t height, size_t srcPitch, size_t dstPitch, unsigned kind) { const ihipCtx_t* ctx = this->getCtx(); if ((ctx == nullptr) || (ctx->getDevice() == nullptr)) { tprintf(DB_COPY, "locked_copy2DAsync bad ctx or device\n"); throw ihipException(hipErrorInvalidDevice); } hc::accelerator acc; size_t sizeBytes = width*height; #if (__hcc_workweek__ >= 17332) hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0); #else hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0); hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0); #endif tprintf(DB_COPY, "copy2DAsync dst=%p src=%p, sz=%zu\n", dst, src, sizeBytes); bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes); bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes); hc::hcCommandKind hcCopyDir; ihipCtx_t* copyDevice; bool forceUnpinnedCopy; resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, &copyDevice, &forceUnpinnedCopy); tprintf(DB_COPY, " copyDev:%d dir=%s forceUnpinnedCopy=%d\n", copyDevice ? copyDevice->getDeviceNum() : -1, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy); if (dstTracked && srcTracked && !forceUnpinnedCopy && copyDevice /*code below assumes this is !nullptr*/) { LockedAccessor_StreamCrit_t crit(_criticalData); try { if (HIP_FORCE_SYNC_COPY) { crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo, &copyDevice->getDevice()->_acc, forceUnpinnedCopy); } else { crit->_av.copy2d_async_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo, &copyDevice->getDevice()->_acc); } } catch (Kalmar::runtime_exception) { throw ihipException(hipErrorRuntimeOther); }; if (HIP_API_BLOCKING) { tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for completion of hipMemcpy2DAsync(sz=%zu)\n", ToString(this).c_str(), sizeBytes); this->wait(crit); } } else { //Do sync 2D copy LockedAccessor_StreamCrit_t crit(_criticalData); crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo, copyDevice ? &copyDevice->getDevice()->_acc : nullptr, forceUnpinnedCopy); } } //------------------------------------------------------------------------------------------------- //------------------------------------------------------------------------------------------------- // Profiler, really these should live elsewhere: hipError_t hipProfilerStart() { HIP_INIT_API(hipProfilerStart); #if COMPILE_HIP_ATP_MARKER amdtResumeProfiling(AMDT_ALL_PROFILING); #endif return ihipLogStatus(hipSuccess); }; hipError_t hipProfilerStop() { HIP_INIT_API(hipProfilerStop); #if COMPILE_HIP_ATP_MARKER amdtStopProfiling(AMDT_ALL_PROFILING); #endif return ihipLogStatus(hipSuccess); }; //------------------------------------------------------------------------------------------------- //------------------------------------------------------------------------------------------------- // HCC-specific accessor functions: //--- hipError_t hipHccGetAccelerator(int deviceId, hc::accelerator* acc) { HIP_INIT_API(hipHccGetAccelerator, deviceId, acc); const ihipDevice_t* device = ihipGetDevice(deviceId); hipError_t err; if (device == NULL) { err = hipErrorInvalidDevice; } else { *acc = device->_acc; err = hipSuccess; } return ihipLogStatus(err); } //--- hipError_t hipHccGetAcceleratorView(hipStream_t stream, hc::accelerator_view** av) { HIP_INIT_API(hipHccGetAcceleratorView, stream, av); if (stream == hipStreamNull) { ihipCtx_t* device = ihipGetTlsDefaultCtx(); stream = device->_defaultStream; } *av = stream->locked_getAv(); // TODO - review. hipError_t err = hipSuccess; return ihipLogStatus(err); } //// TODO - add identifier numbers for streams and devices to help with debugging. // TODO - add a contect sequence number for debug. Print operator<< ctx:0.1 (device.ctx) namespace hip_impl { std::vector<hsa_agent_t> all_hsa_agents() { std::vector<hsa_agent_t> r{}; for (auto&& acc : hc::accelerator::get_all()) { const auto agent = acc.get_hsa_agent(); if (!agent || !acc.is_hsa_accelerator()) continue; r.emplace_back(*static_cast<hsa_agent_t*>(agent)); } return r; } [[noreturn]] void hip_throw(const std::exception& ex) { #if defined(__cpp_exceptions) throw ex; #else std::cerr << ex.what() << std::endl; std::terminate(); #endif } } // Namespace hip_impl.
1
7,442
Why is this necessary? Just in order to get an indexed loop?
ROCm-Developer-Tools-HIP
cpp
@@ -68,6 +68,7 @@ type poolSyncMetrics struct { zpoolLastSyncTime *prometheus.GaugeVec zpoolStateUnknown *prometheus.GaugeVec zpoolLastSyncTimeCommandError *prometheus.GaugeVec + zpoolListRequestRejectCounter prometheus.Gauge } // poolfields struct is for pool last sync time metric
1
// Copyright © 2017-2019 The OpenEBS Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zvol import ( "os" "strconv" "strings" zpool "github.com/openebs/maya/pkg/zpool/v1alpha1" "github.com/prometheus/client_golang/prometheus" ) type metrics struct { readBytes *prometheus.GaugeVec writeBytes *prometheus.GaugeVec readCount *prometheus.GaugeVec writeCount *prometheus.GaugeVec syncCount *prometheus.GaugeVec syncLatency *prometheus.GaugeVec readLatency *prometheus.GaugeVec writeLatency *prometheus.GaugeVec replicaStatus *prometheus.GaugeVec inflightIOCount *prometheus.GaugeVec dispatchedIOCount *prometheus.GaugeVec rebuildCount *prometheus.GaugeVec rebuildBytes *prometheus.GaugeVec rebuildStatus *prometheus.GaugeVec rebuildDoneCount *prometheus.GaugeVec rebuildFailedCount *prometheus.GaugeVec zfsCommandErrorCounter prometheus.Gauge zfsStatsParseErrorCounter prometheus.Gauge zfsStatsRejectRequestCounter prometheus.Gauge zfsStatsNoDataSetAvailableErrorCounter prometheus.Gauge zfsStatsInitializeLibuzfsClientErrorCounter prometheus.Gauge } type listMetrics struct { used *prometheus.GaugeVec available *prometheus.GaugeVec zfsListParseErrorCounter prometheus.Gauge zfsListCommandErrorCounter prometheus.Gauge zfsListRequestRejectCounter prometheus.Gauge zfsListNoDataSetAvailableErrorCounter prometheus.Gauge zfsListInitializeLibuzfsClientErrorCounter prometheus.Gauge } type poolSyncMetrics struct { zpoolLastSyncTime *prometheus.GaugeVec zpoolStateUnknown *prometheus.GaugeVec zpoolLastSyncTimeCommandError *prometheus.GaugeVec } // poolfields struct is for pool last sync time metric type poolfields struct { name string zpoolLastSyncTime float64 zpoolStateUnknown float64 zpoolLastSyncTimeCommandError float64 } type fields struct { name string used float64 available float64 } func newPoolMetrics() *poolSyncMetrics { return new(poolSyncMetrics) } // newMetrics initializes fields of the metrics and returns its instance func newListMetrics() *listMetrics { return new(listMetrics) } func (l *listMetrics) withUsedSize() *listMetrics { l.used = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "volume_replica_used_size", Help: "Used size of volume replica on a pool", }, []string{"name"}, ) return l } func (l *listMetrics) withAvailableSize() *listMetrics { l.available = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "volume_replica_available_size", Help: "Available size of volume replica on a pool", }, []string{"name"}, ) return l } func (l *listMetrics) withParseErrorCounter() *listMetrics { l.zfsListParseErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_list_parse_error", Help: "Total no of zfs list parse errors", }, ) return l } func (l *listMetrics) withCommandErrorCounter() *listMetrics { l.zfsListCommandErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_list_command_error", Help: "Total no of zfs command errors", }, ) return l } func (l *listMetrics) withRequestRejectCounter() *listMetrics { l.zfsListRequestRejectCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_list_request_reject_count", Help: "Total no of rejected requests of zfs list", }, ) return l } func (l *listMetrics) withNoDatasetAvailableErrorCounter() *listMetrics { l.zfsListNoDataSetAvailableErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_list_no_dataset_available_error_counter", Help: "Total no of no datasets error in zfs list command", }, ) return l } func (l *listMetrics) withInitializeLibuzfsClientErrorCounter() *listMetrics { l.zfsListInitializeLibuzfsClientErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_list_failed_to_initialize_libuzfs_client_error_counter", Help: "Total no of failed to initialize libuzfs client error in zfs list command", }, ) return l } // newMetrics initializes fields of the metrics and returns its instance func newMetrics() *metrics { return new(metrics) } func (m *metrics) withReadBytes() *metrics { m.readBytes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "total_read_bytes", Help: "Total read in bytes of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withWriteBytes() *metrics { m.writeBytes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "total_write_bytes", Help: "Total write in bytes of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withReadCount() *metrics { m.readCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "total_read_count", Help: "Total read io count of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withWriteCount() *metrics { m.writeCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "total_write_count", Help: "Total write io count of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withSyncCount() *metrics { m.syncCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "sync_count", Help: "Total sync io count of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withSyncLatency() *metrics { m.syncLatency = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "sync_latency", Help: "Sync latency on volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withReadLatency() *metrics { m.readLatency = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "read_latency", Help: "Read latency on volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withWriteLatency() *metrics { m.writeLatency = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "write_latency", Help: "Write latency on volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withReplicaStatus() *metrics { m.replicaStatus = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "replica_status", Help: `Status of volume replica (0, 1, 2, 3) = {"Offline", "Healthy", "Degraded", "Rebuilding"}`, }, []string{"vol", "pool"}, ) return m } func (m *metrics) withinflightIOCount() *metrics { m.inflightIOCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "inflight_io_count", Help: "Inflight IO's count of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withDispatchedIOCount() *metrics { m.dispatchedIOCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "dispatched_io_count", Help: "Dispatched IO's count of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withRebuildCount() *metrics { m.rebuildCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "rebuild_count", Help: "Rebuild count of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withRebuildBytes() *metrics { m.rebuildBytes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "rebuild_bytes", Help: "Rebuild bytes of volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withRebuildStatus() *metrics { m.rebuildStatus = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "rebuild_status", Help: `Status of rebuild on volume replica (0, 1, 2, 3, 4, 5, 6)= {"INIT", "DONE", "SNAP REBUILD INPROGRESS", "ACTIVE DATASET REBUILD INPROGRESS", "ERRORED", "FAILED", "UNKNOWN"}`, }, []string{"vol", "pool"}, ) return m } func (m *metrics) withRebuildDone() *metrics { m.rebuildDoneCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "total_rebuild_done", Help: "Total no of rebuild done on volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withFailedRebuild() *metrics { m.rebuildFailedCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "total_failed_rebuild", Help: "Total no of failed rebuilds on volume replica", }, []string{"vol", "pool"}, ) return m } func (m *metrics) withCommandErrorCounter() *metrics { m.zfsCommandErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_stats_command_error", Help: "Total no of zfs command errors", }, ) return m } func (m *metrics) withParseErrorCounter() *metrics { m.zfsStatsParseErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_stats_parse_error_counter", Help: "Total no of zfs stats parse errors", }, ) return m } func (m *metrics) withRequestRejectCounter() *metrics { m.zfsStatsRejectRequestCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_stats_reject_request_count", Help: "Total no of rejected requests of zfs stats", }, ) return m } func (m *metrics) withNoDatasetAvailableErrorCounter() *metrics { m.zfsStatsNoDataSetAvailableErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_stats_no_dataset_available_error_counter", Help: "Total no of no datasets error in zfs stats command", }, ) return m } func (m *metrics) withInitializeLibuzfsClientErrorCounter() *metrics { m.zfsStatsInitializeLibuzfsClientErrorCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zfs_stats_failed_to_initialize_libuzfs_client_error_counter", Help: "Total no of failed to initialize libuzfs client error in zfs stats command", }, ) return m } // All new metrics related to pool last sync time func (p *poolSyncMetrics) withZpoolStateUnknown() *poolSyncMetrics { p.zpoolStateUnknown = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zpool_state_unknown", Help: "zpool state unknown", }, []string{"pool"}, ) return p } func (p *poolSyncMetrics) withzpoolLastSyncTimeCommandError() *poolSyncMetrics { p.zpoolLastSyncTimeCommandError = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zpool_sync_time_command_error", Help: "Zpool sync time command error", }, []string{"pool"}, ) return p } func (p *poolSyncMetrics) withZpoolLastSyncTime() *poolSyncMetrics { p.zpoolLastSyncTime = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "openebs", Name: "zpool_last_sync_time", Help: "Last sync time of pool", }, []string{"pool"}, ) return p } func parseFloat64(e string, m *listMetrics) float64 { num, err := strconv.ParseFloat(e, 64) if err != nil { m.zfsListParseErrorCounter.Inc() } return num } func listParser(stdout []byte, m *listMetrics) []fields { if len(string(stdout)) == 0 { m.zfsListParseErrorCounter.Inc() return nil } list := make([]fields, 0) vols := strings.Split(string(stdout), "\n") for _, v := range vols { f := strings.Fields(v) if len(f) < 3 { break } vol := fields{ name: f[0], used: parseFloat64(f[1], m), available: parseFloat64(f[2], m), } list = append(list, vol) } return list } // poolMetricParser is used to parse output from zfs get io.openebs:livenesstimestamp func poolMetricParser(stdout []byte) *poolfields { if len(string(stdout)) == 0 { pool := poolfields{ name: os.Getenv("HOSTNAME"), zpoolLastSyncTime: zpool.ZpoolLastSyncCommandErrorOrUnknownUnset, zpoolLastSyncTimeCommandError: zpool.ZpoolLastSyncCommandErrorOrUnknownUnset, zpoolStateUnknown: zpool.ZpoolLastSyncCommandErrorOrUnknownSet, } return &pool } pools := strings.Split(string(stdout), "\n") f := strings.Fields(pools[0]) if len(f) < 2 { return nil } pool := poolfields{ name: f[0], zpoolLastSyncTime: poolSyncTimeParseFloat64(f[2]), zpoolStateUnknown: zpool.ZpoolLastSyncCommandErrorOrUnknownUnset, zpoolLastSyncTimeCommandError: zpool.ZpoolLastSyncCommandErrorOrUnknownUnset, } return &pool } // poolSyncTimeParseFloat64 is used to convert epoch timestamp in string to float64 func poolSyncTimeParseFloat64(e string) float64 { num, err := strconv.ParseFloat(e, 64) if err != nil { return 0 } return num }
1
18,124
instead of using `prometheus.Gauge`, using `promethus.Counter` will be better.
openebs-maya
go
@@ -23,5 +23,9 @@ module Ncr def whsc? code == WHSC_CODE end + + def ba_6x_tier1_team? + code.match(/^P11[7J4T1ACZ]....$/) + end end end
1
module Ncr class Organization < ActiveRecord::Base WHSC_CODE = "P1122021" OOL_CODES = [ "P1171001", "P1172001", "P1173001", ] has_many :ncr_work_orders, class_name: Ncr::WorkOrder, foreign_key: "ncr_organization_id" validates :code, presence: true, uniqueness: true validates :name, presence: true def code_and_name "#{code} #{name}" end def ool? OOL_CODES.include?(code) end def whsc? code == WHSC_CODE end end end
1
16,522
maybe we should have a unit test for this and then just test one case in `spec/services/ncr/approval_manager_spec.rb` ?
18F-C2
rb
@@ -21,8 +21,9 @@ class CommunicartsController < ApplicationController def approval_response cart = Cart.find(params[:cart_id]).decorate client_data = cart.proposal.client_data_legacy - approval = cart.approvals.find_by(user_id: user_id) - + approval = cart.approvals.find_by(user_id: user_id) + @token ||= ApiToken.find_by(approval_id: approval.id) + if !approval.pending? flash[:error] = "You have already logged a response for Cart #{client_data.public_identifier}" else
1
require ::File.expand_path('authentication_error.rb', 'lib/errors') require ::File.expand_path('approval_group_error.rb', 'lib/errors') class CommunicartsController < ApplicationController skip_before_action :verify_authenticity_token before_filter :validate_access, only: :approval_response rescue_from AuthenticationError do |exception| authentication_error(exception) end rescue_from ApprovalGroupError, with: :approval_group_error def send_cart cart = Commands::Approval::InitiateCartApproval.new.perform(params) jcart = cart.as_json render json: jcart, status: 201 end def approval_response cart = Cart.find(params[:cart_id]).decorate client_data = cart.proposal.client_data_legacy approval = cart.approvals.find_by(user_id: user_id) if !approval.pending? flash[:error] = "You have already logged a response for Cart #{client_data.public_identifier}" else case params[:approver_action] when 'approve' approval.approve! flash[:success] = "You have approved Cart #{client_data.public_identifier}." when 'reject' approval.reject! flash[:success] = "You have rejected Cart #{client_data.public_identifier}." end end if @token @token.use! end redirect_to cart_path(cart) end private def validate_access return if signed_in? @token = ApiToken.find_by(access_token: params[:cch]) if !@token raise AuthenticationError.new(msg: 'something went wrong with the token (nonexistent)') elsif @token.expires_at && @token.expires_at < Time.now raise AuthenticationError.new(msg: 'something went wrong with the token (expired)') elsif @token.used? raise AuthenticationError.new(msg: 'Something went wrong with the token. It has already been used.') elsif @token.cart_id != params[:cart_id].to_i raise AuthenticationError.new(msg: 'Something went wrong with the cart (wrong cart)') else sign_in(@token.user) end end def user_id if signed_in? current_user.id else @token.user_id end end def authentication_error(e) flash[:error] = e.message redirect_to "/498.html" end def approval_group_error(error) render json: { message: error.to_s }, status: 400 end end
1
12,719
Why is this necessary?
18F-C2
rb
@@ -320,7 +320,7 @@ public class Spark3Util { } } - private static DistributionMode getDistributionMode(org.apache.iceberg.Table table) { + public static DistributionMode getDistributionMode(org.apache.iceberg.Table table) { boolean isSortedTable = !table.sortOrder().isUnsorted(); String defaultModeName = isSortedTable ? WRITE_DISTRIBUTION_MODE_RANGE : WRITE_DISTRIBUTION_MODE_DEFAULT; String modeName = table.properties().getOrDefault(WRITE_DISTRIBUTION_MODE, defaultModeName);
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.apache.iceberg.DistributionMode; import org.apache.iceberg.MetadataTableType; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.TableProperties; import org.apache.iceberg.UpdateProperties; import org.apache.iceberg.UpdateSchema; import org.apache.iceberg.catalog.Namespace; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.expressions.BoundPredicate; import org.apache.iceberg.expressions.ExpressionVisitors; import org.apache.iceberg.expressions.Term; import org.apache.iceberg.expressions.UnboundPredicate; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.relocated.com.google.common.base.Joiner; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.spark.source.SparkTable; import org.apache.iceberg.transforms.PartitionSpecVisitor; import org.apache.iceberg.transforms.SortOrderVisitor; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; import org.apache.iceberg.util.ArrayUtil; import org.apache.iceberg.util.Pair; import org.apache.iceberg.util.PropertyUtil; import org.apache.iceberg.util.SortOrderUtil; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.apache.spark.sql.catalyst.parser.ParseException; import org.apache.spark.sql.catalyst.parser.ParserInterface; import org.apache.spark.sql.connector.catalog.CatalogManager; import org.apache.spark.sql.connector.catalog.CatalogPlugin; import org.apache.spark.sql.connector.catalog.Identifier; import org.apache.spark.sql.connector.catalog.Table; import org.apache.spark.sql.connector.catalog.TableCatalog; import org.apache.spark.sql.connector.catalog.TableChange; import org.apache.spark.sql.connector.expressions.Expression; import org.apache.spark.sql.connector.expressions.Expressions; import org.apache.spark.sql.connector.expressions.Literal; import org.apache.spark.sql.connector.expressions.Transform; import org.apache.spark.sql.connector.iceberg.distributions.Distribution; import org.apache.spark.sql.connector.iceberg.distributions.Distributions; import org.apache.spark.sql.connector.iceberg.distributions.OrderedDistribution; import org.apache.spark.sql.connector.iceberg.expressions.SortOrder; import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation; import org.apache.spark.sql.types.IntegerType; import org.apache.spark.sql.types.LongType; import org.apache.spark.sql.util.CaseInsensitiveStringMap; import scala.Some; import scala.collection.JavaConverters; import scala.collection.Seq; import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE; import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_DEFAULT; import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_RANGE; public class Spark3Util { private static final Set<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs"); private static final Set<String> RESERVED_PROPERTIES = ImmutableSet.of( TableCatalog.PROP_LOCATION, TableCatalog.PROP_PROVIDER); private static final Joiner DOT = Joiner.on("."); private Spark3Util() { } public static Map<String, String> rebuildCreateProperties(Map<String, String> createProperties) { ImmutableMap.Builder<String, String> tableProperties = ImmutableMap.builder(); createProperties.entrySet().stream() .filter(entry -> !RESERVED_PROPERTIES.contains(entry.getKey())) .forEach(tableProperties::put); String provider = createProperties.get(TableCatalog.PROP_PROVIDER); if ("parquet".equalsIgnoreCase(provider)) { tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "parquet"); } else if ("avro".equalsIgnoreCase(provider)) { tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "avro"); } else if ("orc".equalsIgnoreCase(provider)) { tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "orc"); } else if (provider != null && !"iceberg".equalsIgnoreCase(provider)) { throw new IllegalArgumentException("Unsupported format in USING: " + provider); } return tableProperties.build(); } /** * Applies a list of Spark table changes to an {@link UpdateProperties} operation. * * @param pendingUpdate an uncommitted UpdateProperties operation to configure * @param changes a list of Spark table changes * @return the UpdateProperties operation configured with the changes */ public static UpdateProperties applyPropertyChanges(UpdateProperties pendingUpdate, List<TableChange> changes) { for (TableChange change : changes) { if (change instanceof TableChange.SetProperty) { TableChange.SetProperty set = (TableChange.SetProperty) change; pendingUpdate.set(set.property(), set.value()); } else if (change instanceof TableChange.RemoveProperty) { TableChange.RemoveProperty remove = (TableChange.RemoveProperty) change; pendingUpdate.remove(remove.property()); } else { throw new UnsupportedOperationException("Cannot apply unknown table change: " + change); } } return pendingUpdate; } /** * Applies a list of Spark table changes to an {@link UpdateSchema} operation. * * @param pendingUpdate an uncommitted UpdateSchema operation to configure * @param changes a list of Spark table changes * @return the UpdateSchema operation configured with the changes */ public static UpdateSchema applySchemaChanges(UpdateSchema pendingUpdate, List<TableChange> changes) { for (TableChange change : changes) { if (change instanceof TableChange.AddColumn) { apply(pendingUpdate, (TableChange.AddColumn) change); } else if (change instanceof TableChange.UpdateColumnType) { TableChange.UpdateColumnType update = (TableChange.UpdateColumnType) change; Type newType = SparkSchemaUtil.convert(update.newDataType()); Preconditions.checkArgument(newType.isPrimitiveType(), "Cannot update '%s', not a primitive type: %s", DOT.join(update.fieldNames()), update.newDataType()); pendingUpdate.updateColumn(DOT.join(update.fieldNames()), newType.asPrimitiveType()); } else if (change instanceof TableChange.UpdateColumnComment) { TableChange.UpdateColumnComment update = (TableChange.UpdateColumnComment) change; pendingUpdate.updateColumnDoc(DOT.join(update.fieldNames()), update.newComment()); } else if (change instanceof TableChange.RenameColumn) { TableChange.RenameColumn rename = (TableChange.RenameColumn) change; pendingUpdate.renameColumn(DOT.join(rename.fieldNames()), rename.newName()); } else if (change instanceof TableChange.DeleteColumn) { TableChange.DeleteColumn delete = (TableChange.DeleteColumn) change; pendingUpdate.deleteColumn(DOT.join(delete.fieldNames())); } else if (change instanceof TableChange.UpdateColumnNullability) { TableChange.UpdateColumnNullability update = (TableChange.UpdateColumnNullability) change; if (update.nullable()) { pendingUpdate.makeColumnOptional(DOT.join(update.fieldNames())); } else { pendingUpdate.requireColumn(DOT.join(update.fieldNames())); } } else if (change instanceof TableChange.UpdateColumnPosition) { apply(pendingUpdate, (TableChange.UpdateColumnPosition) change); } else { throw new UnsupportedOperationException("Cannot apply unknown table change: " + change); } } return pendingUpdate; } private static void apply(UpdateSchema pendingUpdate, TableChange.UpdateColumnPosition update) { Preconditions.checkArgument(update.position() != null, "Invalid position: null"); if (update.position() instanceof TableChange.After) { TableChange.After after = (TableChange.After) update.position(); String referenceField = peerName(update.fieldNames(), after.column()); pendingUpdate.moveAfter(DOT.join(update.fieldNames()), referenceField); } else if (update.position() instanceof TableChange.First) { pendingUpdate.moveFirst(DOT.join(update.fieldNames())); } else { throw new IllegalArgumentException("Unknown position for reorder: " + update.position()); } } private static void apply(UpdateSchema pendingUpdate, TableChange.AddColumn add) { Preconditions.checkArgument(add.isNullable(), "Incompatible change: cannot add required column: %s", leafName(add.fieldNames())); Type type = SparkSchemaUtil.convert(add.dataType()); pendingUpdate.addColumn(parentName(add.fieldNames()), leafName(add.fieldNames()), type, add.comment()); if (add.position() instanceof TableChange.After) { TableChange.After after = (TableChange.After) add.position(); String referenceField = peerName(add.fieldNames(), after.column()); pendingUpdate.moveAfter(DOT.join(add.fieldNames()), referenceField); } else if (add.position() instanceof TableChange.First) { pendingUpdate.moveFirst(DOT.join(add.fieldNames())); } else { Preconditions.checkArgument(add.position() == null, "Cannot add '%s' at unknown position: %s", DOT.join(add.fieldNames()), add.position()); } } public static org.apache.iceberg.Table toIcebergTable(Table table) { Preconditions.checkArgument(table instanceof SparkTable, "Table %s is not an Iceberg table", table); SparkTable sparkTable = (SparkTable) table; return sparkTable.table(); } /** * Converts a PartitionSpec to Spark transforms. * * @param spec a PartitionSpec * @return an array of Transforms */ public static Transform[] toTransforms(PartitionSpec spec) { List<Transform> transforms = PartitionSpecVisitor.visit(spec, new PartitionSpecVisitor<Transform>() { @Override public Transform identity(String sourceName, int sourceId) { return Expressions.identity(sourceName); } @Override public Transform bucket(String sourceName, int sourceId, int numBuckets) { return Expressions.bucket(numBuckets, sourceName); } @Override public Transform truncate(String sourceName, int sourceId, int width) { return Expressions.apply("truncate", Expressions.column(sourceName), Expressions.literal(width)); } @Override public Transform year(String sourceName, int sourceId) { return Expressions.years(sourceName); } @Override public Transform month(String sourceName, int sourceId) { return Expressions.months(sourceName); } @Override public Transform day(String sourceName, int sourceId) { return Expressions.days(sourceName); } @Override public Transform hour(String sourceName, int sourceId) { return Expressions.hours(sourceName); } @Override public Transform unknown(int fieldId, String sourceName, int sourceId, String transform) { return Expressions.apply(transform, Expressions.column(sourceName)); } }); return transforms.toArray(new Transform[0]); } public static Distribution buildRequiredDistribution(org.apache.iceberg.Table table) { DistributionMode distributionMode = getDistributionMode(table); switch (distributionMode) { case NONE: return Distributions.unspecified(); case HASH: if (table.spec().isUnpartitioned()) { return Distributions.unspecified(); } else { return Distributions.clustered(toTransforms(table.spec())); } case RANGE: if (table.spec().isUnpartitioned() && table.sortOrder().isUnsorted()) { return Distributions.unspecified(); } else { org.apache.iceberg.SortOrder requiredSortOrder = SortOrderUtil.buildSortOrder(table); return Distributions.ordered(convert(requiredSortOrder)); } default: throw new IllegalArgumentException("Unsupported distribution mode: " + distributionMode); } } public static SortOrder[] buildRequiredOrdering(Distribution distribution, org.apache.iceberg.Table table) { if (distribution instanceof OrderedDistribution) { OrderedDistribution orderedDistribution = (OrderedDistribution) distribution; return orderedDistribution.ordering(); } else { org.apache.iceberg.SortOrder requiredSortOrder = SortOrderUtil.buildSortOrder(table); return convert(requiredSortOrder); } } private static DistributionMode getDistributionMode(org.apache.iceberg.Table table) { boolean isSortedTable = !table.sortOrder().isUnsorted(); String defaultModeName = isSortedTable ? WRITE_DISTRIBUTION_MODE_RANGE : WRITE_DISTRIBUTION_MODE_DEFAULT; String modeName = table.properties().getOrDefault(WRITE_DISTRIBUTION_MODE, defaultModeName); return DistributionMode.fromName(modeName); } public static SortOrder[] convert(org.apache.iceberg.SortOrder sortOrder) { List<OrderField> converted = SortOrderVisitor.visit(sortOrder, new SortOrderToSpark()); return converted.toArray(new OrderField[0]); } public static Term toIcebergTerm(Transform transform) { Preconditions.checkArgument(transform.references().length == 1, "Cannot convert transform with more than one column reference: %s", transform); String colName = DOT.join(transform.references()[0].fieldNames()); switch (transform.name()) { case "identity": return org.apache.iceberg.expressions.Expressions.ref(colName); case "bucket": return org.apache.iceberg.expressions.Expressions.bucket(colName, findWidth(transform)); case "years": return org.apache.iceberg.expressions.Expressions.year(colName); case "months": return org.apache.iceberg.expressions.Expressions.month(colName); case "date": case "days": return org.apache.iceberg.expressions.Expressions.day(colName); case "date_hour": case "hours": return org.apache.iceberg.expressions.Expressions.hour(colName); case "truncate": return org.apache.iceberg.expressions.Expressions.truncate(colName, findWidth(transform)); default: throw new UnsupportedOperationException("Transform is not supported: " + transform); } } /** * Converts Spark transforms into a {@link PartitionSpec}. * * @param schema the table schema * @param partitioning Spark Transforms * @return a PartitionSpec */ public static PartitionSpec toPartitionSpec(Schema schema, Transform[] partitioning) { if (partitioning == null || partitioning.length == 0) { return PartitionSpec.unpartitioned(); } PartitionSpec.Builder builder = PartitionSpec.builderFor(schema); for (Transform transform : partitioning) { Preconditions.checkArgument(transform.references().length == 1, "Cannot convert transform with more than one column reference: %s", transform); String colName = DOT.join(transform.references()[0].fieldNames()); switch (transform.name()) { case "identity": builder.identity(colName); break; case "bucket": builder.bucket(colName, findWidth(transform)); break; case "years": builder.year(colName); break; case "months": builder.month(colName); break; case "date": case "days": builder.day(colName); break; case "date_hour": case "hours": builder.hour(colName); break; case "truncate": builder.truncate(colName, findWidth(transform)); break; default: throw new UnsupportedOperationException("Transform is not supported: " + transform); } } return builder.build(); } @SuppressWarnings("unchecked") private static int findWidth(Transform transform) { for (Expression expr : transform.arguments()) { if (expr instanceof Literal) { if (((Literal) expr).dataType() instanceof IntegerType) { Literal<Integer> lit = (Literal<Integer>) expr; Preconditions.checkArgument(lit.value() > 0, "Unsupported width for transform: %s", transform.describe()); return lit.value(); } else if (((Literal) expr).dataType() instanceof LongType) { Literal<Long> lit = (Literal<Long>) expr; Preconditions.checkArgument(lit.value() > 0 && lit.value() < Integer.MAX_VALUE, "Unsupported width for transform: %s", transform.describe()); if (lit.value() > Integer.MAX_VALUE) { throw new IllegalArgumentException(); } return lit.value().intValue(); } } } throw new IllegalArgumentException("Cannot find width for transform: " + transform.describe()); } private static String leafName(String[] fieldNames) { Preconditions.checkArgument(fieldNames.length > 0, "Invalid field name: at least one name is required"); return fieldNames[fieldNames.length - 1]; } private static String peerName(String[] fieldNames, String fieldName) { if (fieldNames.length > 1) { String[] peerNames = Arrays.copyOf(fieldNames, fieldNames.length); peerNames[fieldNames.length - 1] = fieldName; return DOT.join(peerNames); } return fieldName; } private static String parentName(String[] fieldNames) { if (fieldNames.length > 1) { return DOT.join(Arrays.copyOfRange(fieldNames, 0, fieldNames.length - 1)); } return null; } public static String describe(org.apache.iceberg.expressions.Expression expr) { return ExpressionVisitors.visit(expr, DescribeExpressionVisitor.INSTANCE); } public static String describe(Schema schema) { return TypeUtil.visit(schema, DescribeSchemaVisitor.INSTANCE); } public static String describe(Type type) { return TypeUtil.visit(type, DescribeSchemaVisitor.INSTANCE); } public static boolean isLocalityEnabled(FileIO io, String location, CaseInsensitiveStringMap readOptions) { InputFile in = io.newInputFile(location); if (in instanceof HadoopInputFile) { String scheme = ((HadoopInputFile) in).getFileSystem().getScheme(); return readOptions.getBoolean("locality", LOCALITY_WHITELIST_FS.contains(scheme)); } return false; } public static boolean isVectorizationEnabled(Map<String, String> properties, CaseInsensitiveStringMap readOptions) { String batchReadsSessionConf = SparkSession.active().conf() .get("spark.sql.iceberg.vectorization.enabled", null); if (batchReadsSessionConf != null) { return Boolean.valueOf(batchReadsSessionConf); } return readOptions.getBoolean(SparkReadOptions.VECTORIZATION_ENABLED, PropertyUtil.propertyAsBoolean(properties, TableProperties.PARQUET_VECTORIZATION_ENABLED, TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT)); } public static int batchSize(Map<String, String> properties, CaseInsensitiveStringMap readOptions) { return readOptions.getInt(SparkReadOptions.VECTORIZATION_BATCH_SIZE, PropertyUtil.propertyAsInt(properties, TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT)); } public static Long propertyAsLong(CaseInsensitiveStringMap options, String property, Long defaultValue) { if (defaultValue != null) { return options.getLong(property, defaultValue); } String value = options.get(property); if (value != null) { return Long.parseLong(value); } return null; } public static Integer propertyAsInt(CaseInsensitiveStringMap options, String property, Integer defaultValue) { if (defaultValue != null) { return options.getInt(property, defaultValue); } String value = options.get(property); if (value != null) { return Integer.parseInt(value); } return null; } public static class DescribeSchemaVisitor extends TypeUtil.SchemaVisitor<String> { private static final Joiner COMMA = Joiner.on(','); private static final DescribeSchemaVisitor INSTANCE = new DescribeSchemaVisitor(); private DescribeSchemaVisitor() { } @Override public String schema(Schema schema, String structResult) { return structResult; } @Override public String struct(Types.StructType struct, List<String> fieldResults) { return "struct<" + COMMA.join(fieldResults) + ">"; } @Override public String field(Types.NestedField field, String fieldResult) { return field.name() + ": " + fieldResult + (field.isRequired() ? " not null" : ""); } @Override public String list(Types.ListType list, String elementResult) { return "map<" + elementResult + ">"; } @Override public String map(Types.MapType map, String keyResult, String valueResult) { return "map<" + keyResult + ", " + valueResult + ">"; } @Override public String primitive(Type.PrimitiveType primitive) { switch (primitive.typeId()) { case BOOLEAN: return "boolean"; case INTEGER: return "int"; case LONG: return "bigint"; case FLOAT: return "float"; case DOUBLE: return "double"; case DATE: return "date"; case TIME: return "time"; case TIMESTAMP: return "timestamp"; case STRING: case UUID: return "string"; case FIXED: case BINARY: return "binary"; case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) primitive; return "decimal(" + decimal.precision() + "," + decimal.scale() + ")"; } throw new UnsupportedOperationException("Cannot convert type to SQL: " + primitive); } } private static class DescribeExpressionVisitor extends ExpressionVisitors.ExpressionVisitor<String> { private static final DescribeExpressionVisitor INSTANCE = new DescribeExpressionVisitor(); private DescribeExpressionVisitor() { } @Override public String alwaysTrue() { return "true"; } @Override public String alwaysFalse() { return "false"; } @Override public String not(String result) { return "NOT (" + result + ")"; } @Override public String and(String leftResult, String rightResult) { return "(" + leftResult + " AND " + rightResult + ")"; } @Override public String or(String leftResult, String rightResult) { return "(" + leftResult + " OR " + rightResult + ")"; } @Override public <T> String predicate(BoundPredicate<T> pred) { throw new UnsupportedOperationException("Cannot convert bound predicates to SQL"); } @Override public <T> String predicate(UnboundPredicate<T> pred) { switch (pred.op()) { case IS_NULL: return pred.ref().name() + " IS NULL"; case NOT_NULL: return pred.ref().name() + " IS NOT NULL"; case IS_NAN: return "is_nan(" + pred.ref().name() + ")"; case NOT_NAN: return "not_nan(" + pred.ref().name() + ")"; case LT: return pred.ref().name() + " < " + sqlString(pred.literal()); case LT_EQ: return pred.ref().name() + " <= " + sqlString(pred.literal()); case GT: return pred.ref().name() + " > " + sqlString(pred.literal()); case GT_EQ: return pred.ref().name() + " >= " + sqlString(pred.literal()); case EQ: return pred.ref().name() + " = " + sqlString(pred.literal()); case NOT_EQ: return pred.ref().name() + " != " + sqlString(pred.literal()); case STARTS_WITH: return pred.ref().name() + " LIKE '" + pred.literal() + "%'"; case IN: return pred.ref().name() + " IN (" + sqlString(pred.literals()) + ")"; case NOT_IN: return pred.ref().name() + " NOT IN (" + sqlString(pred.literals()) + ")"; default: throw new UnsupportedOperationException("Cannot convert predicate to SQL: " + pred); } } private static <T> String sqlString(List<org.apache.iceberg.expressions.Literal<T>> literals) { return literals.stream().map(DescribeExpressionVisitor::sqlString).collect(Collectors.joining(", ")); } private static String sqlString(org.apache.iceberg.expressions.Literal<?> lit) { if (lit.value() instanceof String) { return "'" + lit.value() + "'"; } else if (lit.value() instanceof ByteBuffer) { throw new IllegalArgumentException("Cannot convert bytes to SQL literal: " + lit); } else { return lit.value().toString(); } } } /** * Returns a Metadata Table Dataset if it can be loaded from a Spark V2 Catalog * * Because Spark does not allow more than 1 piece in the namespace for a Session Catalog table, we circumvent * the entire resolution path for tables and instead look up the table directly ourselves. This lets us correctly * get metadata tables for the SessionCatalog, if we didn't have to work around this we could just use spark.table. * * @param spark SparkSession used for looking up catalog references and tables * @param name The multipart identifier of the base Iceberg table * @param type The type of metadata table to load * @return null if we cannot find the Metadata Table, a Dataset of rows otherwise */ private static Dataset<Row> loadCatalogMetadataTable(SparkSession spark, String name, MetadataTableType type) { try { CatalogAndIdentifier catalogAndIdentifier = catalogAndIdentifier(spark, name); if (catalogAndIdentifier.catalog instanceof BaseCatalog) { BaseCatalog catalog = (BaseCatalog) catalogAndIdentifier.catalog; Identifier baseId = catalogAndIdentifier.identifier; Identifier metaId = Identifier.of(ArrayUtil.add(baseId.namespace(), baseId.name()), type.name()); Table metaTable = catalog.loadTable(metaId); return Dataset.ofRows(spark, DataSourceV2Relation.create(metaTable, Some.apply(catalog), Some.apply(metaId))); } } catch (NoSuchTableException | ParseException e) { // Could not find table return null; } // Could not find table return null; } public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, String name) throws ParseException { return catalogAndIdentifier(spark, name, spark.sessionState().catalogManager().currentCatalog()); } public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, String name, CatalogPlugin defaultCatalog) throws ParseException { ParserInterface parser = spark.sessionState().sqlParser(); Seq<String> multiPartIdentifier = parser.parseMultipartIdentifier(name); List<String> javaMultiPartIdentifier = JavaConverters.seqAsJavaList(multiPartIdentifier); return catalogAndIdentifier(spark, javaMultiPartIdentifier, defaultCatalog); } public static CatalogAndIdentifier catalogAndIdentifier(String description, SparkSession spark, String name) { return catalogAndIdentifier(description, spark, name, spark.sessionState().catalogManager().currentCatalog()); } public static CatalogAndIdentifier catalogAndIdentifier(String description, SparkSession spark, String name, CatalogPlugin defaultCatalog) { try { return catalogAndIdentifier(spark, name, defaultCatalog); } catch (ParseException e) { throw new IllegalArgumentException("Cannot parse " + description + ": " + name, e); } } public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, List<String> nameParts) { return catalogAndIdentifier(spark, nameParts, spark.sessionState().catalogManager().currentCatalog()); } /** * A modified version of Spark's LookupCatalog.CatalogAndIdentifier.unapply * Attempts to find the catalog and identifier a multipart identifier represents * @param spark Spark session to use for resolution * @param nameParts Multipart identifier representing a table * @param defaultCatalog Catalog to use if none is specified * @return The CatalogPlugin and Identifier for the table */ public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, List<String> nameParts, CatalogPlugin defaultCatalog) { CatalogManager catalogManager = spark.sessionState().catalogManager(); String[] currentNamespace; if (defaultCatalog.equals(catalogManager.currentCatalog())) { currentNamespace = catalogManager.currentNamespace(); } else { currentNamespace = defaultCatalog.defaultNamespace(); } Pair<CatalogPlugin, Identifier> catalogIdentifier = SparkUtil.catalogAndIdentifier(nameParts, catalogName -> { try { return catalogManager.catalog(catalogName); } catch (Exception e) { return null; } }, Identifier::of, defaultCatalog, currentNamespace ); return new CatalogAndIdentifier(catalogIdentifier); } /** * This mimics a class inside of Spark which is private inside of LookupCatalog. */ public static class CatalogAndIdentifier { private final CatalogPlugin catalog; private final Identifier identifier; public CatalogAndIdentifier(CatalogPlugin catalog, Identifier identifier) { this.catalog = catalog; this.identifier = identifier; } public CatalogAndIdentifier(Pair<CatalogPlugin, Identifier> identifier) { this.catalog = identifier.first(); this.identifier = identifier.second(); } public CatalogPlugin catalog() { return catalog; } public Identifier identifier() { return identifier; } } public static TableIdentifier identifierToTableIdentifier(Identifier identifier) { return TableIdentifier.of(Namespace.of(identifier.namespace()), identifier.name()); } }
1
32,819
Minor: If this is going to be public, we should fix the name. `get` doesn't add any value. How about `distributionModeFor` instead?
apache-iceberg
java
@@ -16,7 +16,10 @@ from mitmproxy.tools.console import eventlog class StackWidget(urwid.Frame): - def __init__(self, widget, title, focus): + def __init__(self, window, widget, title, focus): + self.f = focus + self.window = window + if title: header = urwid.AttrWrap( urwid.Text(title),
1
import re import urwid from mitmproxy.tools.console import common from mitmproxy.tools.console import signals from mitmproxy.tools.console import statusbar from mitmproxy.tools.console import flowlist from mitmproxy.tools.console import flowview from mitmproxy.tools.console import commands from mitmproxy.tools.console import keybindings from mitmproxy.tools.console import options from mitmproxy.tools.console import overlay from mitmproxy.tools.console import help from mitmproxy.tools.console import grideditor from mitmproxy.tools.console import eventlog class StackWidget(urwid.Frame): def __init__(self, widget, title, focus): if title: header = urwid.AttrWrap( urwid.Text(title), "heading" if focus else "heading_inactive" ) else: header = None super().__init__( widget, header=header ) def keypress(self, size, key): # Make sure that we don't propagate cursor events outside of the widget. # Otherwise, in a horizontal layout, urwid's Pile would change the focused widget # if we cannot scroll any further. ret = super().keypress(size, key) command = self._command_map[ret] # awkward as they don't implement a full dict api if command and command.startswith("cursor"): return None return ret class WindowStack: def __init__(self, master, base): self.master = master self.windows = dict( flowlist = flowlist.FlowListBox(master), flowview = flowview.FlowView(master), commands = commands.Commands(master), keybindings = keybindings.KeyBindings(master), options = options.Options(master), help = help.HelpView(master), eventlog = eventlog.EventLog(master), edit_focus_query = grideditor.QueryEditor(master), edit_focus_cookies = grideditor.CookieEditor(master), edit_focus_setcookies = grideditor.SetCookieEditor(master), edit_focus_form = grideditor.RequestFormEditor(master), edit_focus_path = grideditor.PathEditor(master), edit_focus_request_headers = grideditor.RequestHeaderEditor(master), edit_focus_response_headers = grideditor.ResponseHeaderEditor(master), ) self.stack = [base] self.overlay = None def set_overlay(self, o, **kwargs): self.overlay = overlay.SimpleOverlay( self, o, self.top_widget(), o.width, **kwargs, ) def top_window(self): """ The current top window, ignoring overlays. """ return self.windows[self.stack[-1]] def top_widget(self): """ The current top widget - either a window or the active overlay. """ if self.overlay: return self.overlay return self.top_window() def push(self, wname): if self.stack[-1] == wname: return prev = self.top_window() self.stack.append(wname) self.call("layout_pushed", prev) def pop(self, *args, **kwargs): """ Pop off the stack, return True if we're already at the top. """ if not self.overlay and len(self.stack) == 1: return True self.call("layout_popping") if self.overlay: self.overlay = None else: self.stack.pop() def call(self, name, *args, **kwargs): """ Call a function on both the top window, and the overlay if there is one. If the widget has a key_responder, we call the function on the responder instead. """ getattr(self.top_window(), name)(*args, **kwargs) if self.overlay: getattr(self.overlay, name)(*args, **kwargs) class Window(urwid.Frame): def __init__(self, master): self.statusbar = statusbar.StatusBar(master) super().__init__( None, header = None, footer = urwid.AttrWrap(self.statusbar, "background") ) self.master = master self.master.view.sig_view_refresh.connect(self.view_changed) self.master.view.sig_view_add.connect(self.view_changed) self.master.view.sig_view_remove.connect(self.view_changed) self.master.view.sig_view_update.connect(self.view_changed) self.master.view.focus.sig_change.connect(self.view_changed) self.master.view.focus.sig_change.connect(self.focus_changed) signals.focus.connect(self.sig_focus) signals.flow_change.connect(self.flow_changed) signals.pop_view_state.connect(self.pop) signals.push_view_state.connect(self.push) self.master.options.subscribe(self.configure, ["console_layout"]) self.master.options.subscribe(self.configure, ["console_layout_headers"]) self.pane = 0 self.stacks = [ WindowStack(master, "flowlist"), WindowStack(master, "eventlog") ] def focus_stack(self): return self.stacks[self.pane] def configure(self, otions, updated): self.refresh() def refresh(self): """ Redraw the layout. """ c = self.master.options.console_layout if c == "single": self.pane = 0 def wrapped(idx): widget = self.stacks[idx].top_widget() if self.master.options.console_layout_headers: title = self.stacks[idx].top_window().title else: title = None return StackWidget( widget, title, self.pane == idx ) w = None if c == "single": w = wrapped(0) elif c == "vertical": w = urwid.Pile( [ wrapped(i) for i, s in enumerate(self.stacks) ], focus_item=self.pane ) else: w = urwid.Columns( [wrapped(i) for i, s in enumerate(self.stacks)], dividechars=1, focus_column=self.pane ) self.body = urwid.AttrWrap(w, "background") def flow_changed(self, sender, flow): if self.master.view.focus.flow: if flow.id == self.master.view.focus.flow.id: self.focus_changed() def focus_changed(self, *args, **kwargs): """ Triggered when the focus changes - either when it's modified, or when it changes to a different flow altogether. """ for i in self.stacks: i.call("focus_changed") def view_changed(self, *args, **kwargs): """ Triggered when the view list has changed. """ for i in self.stacks: i.call("view_changed") def set_overlay(self, o, **kwargs): """ Set an overlay on the currently focused stack. """ self.focus_stack().set_overlay(o, **kwargs) self.refresh() def push(self, wname): """ Push a window onto the currently focused stack. """ self.focus_stack().push(wname) self.refresh() self.view_changed() self.focus_changed() def pop(self, *args, **kwargs): """ Pop a window from the currently focused stack. If there is only one window on the stack, this prompts for exit. """ if self.focus_stack().pop(): self.master.prompt_for_exit() else: self.refresh() self.view_changed() self.focus_changed() def current(self, keyctx): """ Returns the active widget, but only the current focus or overlay has a matching key context. """ t = self.focus_stack().top_widget() if t.keyctx == keyctx: return t def current_window(self, keyctx): """ Returns the active window, ignoring overlays. """ t = self.focus_stack().top_window() if t.keyctx == keyctx: return t def any(self, keyctx): """ Returns the top window of either stack if they match the context. """ for t in [x.top_window() for x in self.stacks]: if t.keyctx == keyctx: return t def sig_focus(self, sender, section): self.focus_position = section def switch(self): """ Switch between the two panes. """ if self.master.options.console_layout == "single": self.pane = 0 else: self.pane = (self.pane + 1) % len(self.stacks) self.refresh() def mouse_event(self, *args, **kwargs): # args: (size, event, button, col, row) k = super().mouse_event(*args, **kwargs) if not k: if args[1] == "mouse drag": signals.status_message.send( message = "Hold down fn, shift, alt or ctrl to select text or use the --no-mouse parameter.", expire = 1 ) elif args[1] == "mouse press" and args[2] == 4: self.keypress(args[0], "up") elif args[1] == "mouse press" and args[2] == 5: self.keypress(args[0], "down") else: return False return True def keypress(self, size, k): k = super().keypress(size, k) if k: return self.master.keymap.handle( self.focus_stack().top_widget().keyctx, k ) class Screen(urwid.raw_display.Screen): def write(self, data): if common.IS_WSL: # replace urwid's SI/SO, which produce artifacts under WSL. # at some point we may figure out what they actually do. data = re.sub("[\x0e\x0f]", "", data) super().write(data)
1
13,700
This needs a more self-explaining name. Maybe `.is_focused`?
mitmproxy-mitmproxy
py
@@ -174,11 +174,17 @@ var rubyAzureMappings = { 'parameter_grouping':['../../dev/TestServer/swagger/azure-parameter-grouping.json', 'ParameterGroupingModule'] }; +var goAzureMappings = { + 'paging':['../../dev/TestServer/swagger/paging.json','paginggroup'], + 'azurereport':['../../dev/TestServer/swagger/azure-report.json', 'azurereport'] +}; + gulp.task('regenerate:expected', function(cb){ runSequence('regenerate:delete', [ - 'regenerate:expected:csazure', 'regenerate:expected:cs', + 'regenerate:expected:csazure', + 'regenerate:expected:csazurefluent', 'regenerate:expected:node', 'regenerate:expected:nodeazure', 'regenerate:expected:ruby',
1
/// <binding Clean='clean' /> var gulp = require('gulp'), msbuild = require('gulp-msbuild'), debug = require('gulp-debug'), env = require('gulp-env'), path = require('path'), fs = require('fs'), merge = require('merge2'), shell = require('gulp-shell'), glob = require('glob'), spawn = require('child_process').spawn, assemblyInfo = require('gulp-dotnet-assembly-info'), nuspecSync = require('./Tools/gulp/gulp-nuspec-sync'), runtimeVersionSync = require('./Tools/gulp/gulp-runtime-version-sync'), nugetProjSync = require('./Tools/gulp/gulp-nuget-proj-sync'), regenExpected = require('./Tools/gulp/gulp-regenerate-expected'), del = require('del'), gutil = require('gulp-util'), runSequence = require('run-sequence'), requireDir = require('require-dir')('./Tools/gulp'), exec = require('child_process').exec; const DEFAULT_ASSEMBLY_VERSION = '0.9.0.0'; const MAX_BUFFER = 1024 * 4096; var isWindows = (process.platform.lastIndexOf('win') === 0); var isLinux= (process.platform.lastIndexOf('linux') === 0); var isMac = (process.platform.lastIndexOf('darwin') === 0); process.env.MSBUILDDISABLENODEREUSE = 1; function GetAutoRestFolder() { if (isWindows) { return "src/core/AutoRest/bin/Debug/net451/win7-x64/"; } if( isMac ) { return "src/core/AutoRest/bin/Debug/net451/osx.10.11-x64/"; } if( isLinux ) { return "src/core/AutoRest/bin/Debug/net451/ubuntu.14.04-x64/" } throw new Error("Unknown platform?"); } function basePathOrThrow() { if (!gutil.env.basePath) { return __dirname; } return gutil.env.basePath; } function mergeOptions(obj1,obj2){ var obj3 = {}; for (var attrname in obj1) { obj3[attrname] = obj1[attrname]; } for (var attrname in obj2) { obj3[attrname] = obj2[attrname]; } return obj3; } var defaultMappings = { 'AcceptanceTests/ParameterFlattening': '../../dev/TestServer/swagger/parameter-flattening.json', 'AcceptanceTests/BodyArray': '../../dev/TestServer/swagger/body-array.json', 'AcceptanceTests/BodyBoolean': '../../dev/TestServer/swagger/body-boolean.json', 'AcceptanceTests/BodyByte': '../../dev/TestServer/swagger/body-byte.json', 'AcceptanceTests/BodyComplex': '../../dev/TestServer/swagger/body-complex.json', 'AcceptanceTests/BodyDate': '../../dev/TestServer/swagger/body-date.json', 'AcceptanceTests/BodyDateTime': '../../dev/TestServer/swagger/body-datetime.json', 'AcceptanceTests/BodyDateTimeRfc1123': '../../dev/TestServer/swagger/body-datetime-rfc1123.json', 'AcceptanceTests/BodyDuration': '../../dev/TestServer/swagger/body-duration.json', 'AcceptanceTests/BodyDictionary': '../../dev/TestServer/swagger/body-dictionary.json', 'AcceptanceTests/BodyFile': '../../dev/TestServer/swagger/body-file.json', 'AcceptanceTests/BodyFormData': '../../dev/TestServer/swagger/body-formdata.json', 'AcceptanceTests/BodyInteger': '../../dev/TestServer/swagger/body-integer.json', 'AcceptanceTests/BodyNumber': '../../dev/TestServer/swagger/body-number.json', 'AcceptanceTests/BodyString': '../../dev/TestServer/swagger/body-string.json', 'AcceptanceTests/Header': '../../dev/TestServer/swagger/header.json', 'AcceptanceTests/Http': '../../dev/TestServer/swagger/httpInfrastructure.json', 'AcceptanceTests/Report': '../../dev/TestServer/swagger/report.json', 'AcceptanceTests/RequiredOptional': '../../dev/TestServer/swagger/required-optional.json', 'AcceptanceTests/Url': '../../dev/TestServer/swagger/url.json', 'AcceptanceTests/Validation': '../../dev/TestServer/swagger/validation.json', 'AcceptanceTests/CustomBaseUri': '../../dev/TestServer/swagger/custom-baseUrl.json', 'AcceptanceTests/CustomBaseUriMoreOptions': '../../dev/TestServer/swagger/custom-baseUrl-more-options.json', 'AcceptanceTests/ModelFlattening': '../../dev/TestServer/swagger/model-flattening.json' }; var rubyMappings = { 'boolean':['../../dev/TestServer/swagger/body-boolean.json', 'BooleanModule'], 'integer':['../../dev/TestServer/swagger/body-integer.json','IntegerModule'], 'number':['../../dev/TestServer/swagger/body-number.json','NumberModule'], 'string':['../../dev/TestServer/swagger/body-string.json','StringModule'], 'byte':['../../dev/TestServer/swagger/body-byte.json','ByteModule'], 'array':['../../dev/TestServer/swagger/body-array.json','ArrayModule'], 'dictionary':['../../dev/TestServer/swagger/body-dictionary.json','DictionaryModule'], 'date':['../../dev/TestServer/swagger/body-date.json','DateModule'], 'datetime':['../../dev/TestServer/swagger/body-datetime.json','DatetimeModule'], 'datetime_rfc1123':['../../dev/TestServer/swagger/body-datetime-rfc1123.json','DatetimeRfc1123Module'], 'duration':['../../dev/TestServer/swagger/body-duration.json','DurationModule'], 'complex':['../../dev/TestServer/swagger/body-complex.json','ComplexModule'], 'url':['../../dev/TestServer/swagger/url.json','UrlModule'], 'url_items':['../../dev/TestServer/swagger/url.json','UrlModule'], 'url_query':['../../dev/TestServer/swagger/url.json','UrlModule'], 'header_folder':['../../dev/TestServer/swagger/header.json','HeaderModule'], 'http_infrastructure':['../../dev/TestServer/swagger/httpInfrastructure.json','HttpInfrastructureModule'], 'required_optional':['../../dev/TestServer/swagger/required-optional.json','RequiredOptionalModule'], 'report':['../../dev/TestServer/swagger/report.json','ReportModule'], 'model_flattening':['../../dev/TestServer/swagger/model-flattening.json', 'ModelFlatteningModule'], 'parameter_flattening':['../../dev/TestServer/swagger/parameter-flattening.json', 'ParameterFlatteningModule'], 'validation':['../../dev/TestServer/swagger/validation.json', 'ValidationModule'], 'custom_base_uri':['../../dev/TestServer/swagger/custom-baseUrl.json', 'CustomBaseUriModule'], 'custom_base_uri_more':['../../dev/TestServer/swagger/custom-baseUrl-more-options.json', 'CustomBaseUriMoreModule'] }; var goMappings = { 'body-array':['../../dev/TestServer/swagger/body-array.json','arraygroup'], 'body-boolean':['../../dev/TestServer/swagger/body-boolean.json', 'booleangroup'], 'body-byte':['../../dev/TestServer/swagger/body-byte.json','bytegroup'], 'body-complex':['../../dev/TestServer/swagger/body-complex.json','complexgroup'], 'body-date':['../../dev/TestServer/swagger/body-date.json','dategroup'], 'body-datetime-rfc1123':['../../dev/TestServer/swagger/body-datetime-rfc1123.json','datetimerfc1123group'], 'body-datetime':['../../dev/TestServer/swagger/body-datetime.json','datetimegroup'], 'body-dictionary':['../../dev/TestServer/swagger/body-dictionary.json','dictionarygroup'], 'body-duration':['../../dev/TestServer/swagger/body-duration.json','durationgroup'], 'body-file':['../../dev/TestServer/swagger/body-file.json', 'filegroup'], 'body-formdata':['../../dev/TestServer/swagger/body-formdata.json', 'formdatagroup'], 'body-integer':['../../dev/TestServer/swagger/body-integer.json','integergroup'], 'body-number':['../../dev/TestServer/swagger/body-number.json','numbergroup'], 'body-string':['../../dev/TestServer/swagger/body-string.json','stringgroup'], 'custom-baseurl':['../../dev/TestServer/swagger/custom-baseUrl.json', 'custombaseurlgroup'], 'header':['../../dev/TestServer/swagger/header.json','headergroup'], 'httpinfrastructure':['../../dev/TestServer/swagger/httpInfrastructure.json','httpinfrastructuregroup'], 'model-flattening':['../../dev/TestServer/swagger/model-flattening.json', 'modelflatteninggroup'], 'report':['../../dev/TestServer/swagger/report.json','report'], 'required-optional':['../../dev/TestServer/swagger/required-optional.json','optionalgroup'], 'url':['../../dev/TestServer/swagger/url.json','urlgroup'], 'validation':['../../dev/TestServer/swagger/validation.json', 'validationgroup'], }; var defaultAzureMappings = { 'AcceptanceTests/Lro': '../../dev/TestServer/swagger/lro.json', 'AcceptanceTests/Paging': '../../dev/TestServer/swagger/paging.json', 'AcceptanceTests/AzureReport': '../../dev/TestServer/swagger/azure-report.json', 'AcceptanceTests/AzureParameterGrouping': '../../dev/TestServer/swagger/azure-parameter-grouping.json', 'AcceptanceTests/AzureResource': '../../dev/TestServer/swagger/azure-resource.json', 'AcceptanceTests/Head': '../../dev/TestServer/swagger/head.json', 'AcceptanceTests/HeadExceptions': '../../dev/TestServer/swagger/head-exceptions.json', 'AcceptanceTests/SubscriptionIdApiVersion': '../../dev/TestServer/swagger/subscriptionId-apiVersion.json', 'AcceptanceTests/AzureSpecials': '../../dev/TestServer/swagger/azure-special-properties.json', 'AcceptanceTests/CustomBaseUri': '../../dev/TestServer/swagger/custom-baseUrl.json' }; var compositeMappings = { 'AcceptanceTests/CompositeBoolIntClient': '../../dev/TestServer/swagger/composite-swagger.json' }; var azureCompositeMappings = { 'AcceptanceTests/AzureCompositeModelClient': '../../dev/TestServer/swagger/azure-composite-swagger.json' }; var nodeAzureMappings = { 'AcceptanceTests/StorageManagementClient': '../../dev/TestServer/swagger/storage.json' }; var nodeMappings = { 'AcceptanceTests/ComplexModelClient': '../../dev/TestServer/swagger/complex-model.json' }; var rubyAzureMappings = { 'head':['../../dev/TestServer/swagger/head.json', 'HeadModule'], 'head_exceptions':['../../dev/TestServer/swagger/head-exceptions.json', 'HeadExceptionsModule'], 'paging':['../../dev/TestServer/swagger/paging.json', 'PagingModule'], 'azure_resource':['../../dev/TestServer/swagger/azure-resource.json', 'AzureResourceModule'], 'lro':['../../dev/TestServer/swagger/lro.json', 'LroModule'], 'azure_url':['../../dev/TestServer/swagger/subscriptionId-apiVersion.json', 'AzureUrlModule'], 'azure_special_properties': ['../../dev/TestServer/swagger/azure-special-properties.json', 'AzureSpecialPropertiesModule'], 'azure_report':['../../dev/TestServer/swagger/azure-report.json', 'AzureReportModule'], 'parameter_grouping':['../../dev/TestServer/swagger/azure-parameter-grouping.json', 'ParameterGroupingModule'] }; gulp.task('regenerate:expected', function(cb){ runSequence('regenerate:delete', [ 'regenerate:expected:csazure', 'regenerate:expected:cs', 'regenerate:expected:node', 'regenerate:expected:nodeazure', 'regenerate:expected:ruby', 'regenerate:expected:rubyazure', 'regenerate:expected:java', 'regenerate:expected:javaazure', 'regenerate:expected:javaazurefluent', 'regenerate:expected:python', 'regenerate:expected:pythonazure', 'regenerate:expected:samples', 'regenerate:expected:go' ], cb); }); gulp.task('regenerate:delete', function(cb){ del([ 'src/generator/AutoRest.CSharp.Azure.Tests/Expected', 'src/generator/AutoRest.CSharp.Tests/Expected', 'src/generator/AutoRest.NodeJS.Tests/Expected', 'src/generator/AutoRest.NodeJS.Azure.Tests/Expected', 'src/generator/AutoRest.Java.Tests/src/main/java', 'src/generator/AutoRest.Java.Azure.Tests/src/main/java', 'src/generator/AutoRest.Java.Azure.Fluent.Tests/src/main/java', 'src/generator/AutoRest.Python.Tests/Expected', 'src/generator/AutoRest.Python.Azure.Tests/Expected', 'src/generator/AutoRest.Go.Tests/src/tests/generated' ], cb); }); gulp.task('regenerate:expected:nodecomposite', function (cb) { regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.NodeJS.Tests', 'inputBaseDir': 'src/generator/AutoRest.NodeJS.Tests', 'mappings': compositeMappings, 'modeler': 'CompositeSwagger', 'outputDir': 'Expected', 'codeGenerator': 'NodeJS', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1' }, cb); }); gulp.task('regenerate:expected:nodeazurecomposite', function (cb) { regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.NodeJS.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.NodeJS.Azure.Tests', 'mappings': azureCompositeMappings, 'modeler': 'CompositeSwagger', 'outputDir': 'Expected', 'codeGenerator': 'Azure.NodeJS', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1' }, cb); }); gulp.task('regenerate:expected:nodeazure', ['regenerate:expected:nodeazurecomposite'], function (cb) { for (var p in defaultAzureMappings) { nodeAzureMappings[p] = defaultAzureMappings[p]; } regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.NodeJS.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': nodeAzureMappings, 'outputDir': 'Expected', 'codeGenerator': 'Azure.NodeJS', 'flatteningThreshold': '1' }, cb); }) gulp.task('regenerate:expected:node', ['regenerate:expected:nodecomposite'], function (cb) { for (var p in defaultMappings) { nodeMappings[p] = defaultMappings[p]; } regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.NodeJS.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': nodeMappings, 'outputDir': 'Expected', 'codeGenerator': 'NodeJS', 'flatteningThreshold': '1' }, cb); }) gulp.task('regenerate:expected:python', function(cb){ regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Python.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': defaultMappings, 'outputDir': 'Expected', 'codeGenerator': 'Python', 'flatteningThreshold': '1' }, cb); }) gulp.task('regenerate:expected:pythonazure', function(cb){ mappings = mergeOptions({ 'AcceptanceTests/AzureBodyDuration': '../../dev/TestServer/swagger/body-duration.json', 'AcceptanceTests/StorageManagementClient': '../../dev/TestServer/swagger/storage.json' }, defaultAzureMappings); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Python.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'Azure.Python', 'flatteningThreshold': '1' }, cb); }) gulp.task('regenerate:expected:rubyazure', function(cb){ regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Ruby.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': rubyAzureMappings, 'outputDir': 'RspecTests/Generated', 'codeGenerator': 'Azure.Ruby', 'nsPrefix': 'MyNamespace' }, cb); }) gulp.task('regenerate:expected:ruby', function(cb){ regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Ruby.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': rubyMappings, 'outputDir': 'RspecTests/Generated', 'codeGenerator': 'Ruby', 'nsPrefix': 'MyNamespace' }, cb); }) gulp.task('regenerate:expected:javaazure', function(cb){ mappings = {}; for (var key in defaultAzureMappings) { mappings[key.substring(16).toLowerCase()] = defaultAzureMappings[key]; } regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Java.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': mappings, 'outputDir': 'src/main/java/fixtures', 'codeGenerator': 'Azure.Java', 'nsPrefix': 'Fixtures' }, cb); }) gulp.task('regenerate:expected:javaazurefluent', function(cb){ mappings = {}; for (var key in defaultAzureMappings) { mappings[key.substring(16).toLowerCase()] = defaultAzureMappings[key]; } regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Java.Azure.Fluent.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': mappings, 'outputDir': 'src/main/java/fixtures', 'codeGenerator': 'Azure.Java.Fluent', 'nsPrefix': 'Fixtures' }, cb); }) gulp.task('regenerate:expected:java', function(cb){ mappings = {}; for (var key in defaultMappings) { mappings[key.substring(16).toLowerCase()] = defaultMappings[key]; } regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Java.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': mappings, 'outputDir': 'src/main/java/fixtures', 'codeGenerator': 'Java', 'nsPrefix': 'Fixtures' }, cb); }) gulp.task('regenerate:expected:csazure', ['regenerate:expected:csazurecomposite','regenerate:expected:csazureallsync', 'regenerate:expected:csazurenosync'], function (cb) { mappings = mergeOptions({ 'AcceptanceTests/AzureBodyDuration': '../../dev/TestServer/swagger/body-duration.json' }, defaultAzureMappings); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'Azure.CSharp', 'nsPrefix': 'Fixtures.Azure', 'flatteningThreshold': '1' }, cb); }); gulp.task('regenerate:expected:cs', ['regenerate:expected:cswithcreds', 'regenerate:expected:cscomposite', 'regenerate:expected:csallsync', 'regenerate:expected:csnosync'], function (cb) { mappings = mergeOptions({ 'Mirror.RecursiveTypes': 'Swagger/swagger-mirror-recursive-type.json', 'Mirror.Primitives': 'Swagger/swagger-mirror-primitives.json', 'Mirror.Sequences': 'Swagger/swagger-mirror-sequences.json', 'Mirror.Polymorphic': 'Swagger/swagger-mirror-polymorphic.json', 'Internal.Ctors': 'Swagger/swagger-internal-ctors.json', 'Additional.Properties': 'Swagger/swagger-additional-properties.yaml', 'DateTimeOffset': 'Swagger/swagger-datetimeoffset.json', 'AcceptanceTests/UrlMultiCollectionFormat' : '../../dev/TestServer/swagger/url-multi-collectionFormat.json' }, defaultMappings); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1' }, cb); }); gulp.task('regenerate:expected:cswithcreds', function(cb){ mappings = mergeOptions( { 'PetstoreV2': 'Swagger/swagger.2.0.example.v2.json', }); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1', 'addCredentials': true }, cb); }); gulp.task('regenerate:expected:csallsync', function(cb){ mappings = mergeOptions( { 'PetstoreV2AllSync': 'Swagger/swagger.2.0.example.v2.json', }); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1', 'syncMethods': 'all' }, cb); }); gulp.task('regenerate:expected:csnosync', function(cb){ mappings = mergeOptions( { 'PetstoreV2NoSync': 'Swagger/swagger.2.0.example.v2.json', }); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1', 'syncMethods': 'none' }, cb); }); gulp.task('regenerate:expected:csazureallsync', function(cb){ mappings = mergeOptions( { 'AcceptanceTests/AzureBodyDurationAllSync': '../../dev/TestServer/swagger/body-duration.json' }); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'Azure.CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1', 'syncMethods': 'all' }, cb); }); gulp.task('regenerate:expected:csazurenosync', function(cb){ mappings = mergeOptions( { 'AcceptanceTests/AzureBodyDurationNoSync': '../../dev/TestServer/swagger/body-duration.json' }); regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': mappings, 'outputDir': 'Expected', 'codeGenerator': 'Azure.CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1', 'syncMethods': 'none' }, cb); }); gulp.task('regenerate:expected:cscomposite', function (cb) { regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Tests', 'mappings': compositeMappings, 'modeler' : 'CompositeSwagger', 'outputDir': 'Expected', 'codeGenerator': 'CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1' }, cb); }); gulp.task('regenerate:expected:csazurecomposite', function (cb) { regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'inputBaseDir': 'src/generator/AutoRest.CSharp.Azure.Tests', 'mappings': azureCompositeMappings, 'modeler': 'CompositeSwagger', 'outputDir': 'Expected', 'codeGenerator': 'Azure.CSharp', 'nsPrefix': 'Fixtures', 'flatteningThreshold': '1' }, cb); }); gulp.task('regenerate:expected:go', function(cb){ regenExpected({ 'outputBaseDir': 'src/generator/AutoRest.Go.Tests', 'inputBaseDir': 'src/generator/AutoRest.Go.Tests', 'mappings': goMappings, 'outputDir': 'src/tests/generated', 'codeGenerator': 'Go' }, cb); process.env.GOPATH = __dirname + '/src/generator/AutoRest.Go.Tests'; }) gulp.task('regenerate:expected:samples', ['regenerate:expected:samples:azure'], function(){ var autorestConfigPath = path.join(basePathOrThrow(), GetAutoRestFolder() + 'AutoRest.json'); var content = fs.readFileSync(autorestConfigPath).toString(); if (content.charCodeAt(0) === 0xFEFF) { content = content.slice(1); } var autorestConfig = JSON.parse(content); for (var lang in autorestConfig.codeGenerators) { if (!lang.match(/^Azure\..+/)) { var generateCmd = path.join(basePathOrThrow(), GetAutoRestFolder() + 'AutoRest.exe') + ' -Modeler Swagger -CodeGenerator ' + lang + ' -OutputDirectory ' + path.join(basePathOrThrow(), 'Samples/petstore/' + lang) + ' -Namespace Petstore -Input ' + path.join(basePathOrThrow(), 'Samples/petstore/petstore.json') + ' -Header NONE'; exec(clrCmd(generateCmd), function(err, stdout, stderr) { console.log(stdout); console.error(stderr); }); } } }); gulp.task('regenerate:expected:samples:azure', function(){ var autorestConfigPath = path.join(basePathOrThrow(), GetAutoRestFolder() + 'AutoRest.json'); var content = fs.readFileSync(autorestConfigPath).toString(); if (content.charCodeAt(0) === 0xFEFF) { content = content.slice(1); } var autorestConfig = JSON.parse(content); for (var lang in autorestConfig.codeGenerators) { if (lang.match(/^Azure\..+/)) { var generateCmd = path.join(basePathOrThrow(), GetAutoRestFolder() + 'AutoRest.exe') + ' -Modeler Swagger -CodeGenerator ' + lang + ' -OutputDirectory ' + path.join(basePathOrThrow(), 'Samples/azure-storage/' + lang) + ' -Namespace Petstore -Input ' + path.join(basePathOrThrow(), 'Samples/azure-storage/azure-storage.json') + ' -Header NONE'; exec(clrCmd(generateCmd), function(err, stdout, stderr) { console.log(stdout); console.error(stderr); }); } } }); var msBuildToolsVersion = 12.0; if (isWindows) { fs.readdirSync('C:/Program Files (x86)/MSBuild/').forEach(function (item) { var itemAsFloat = parseFloat(item); if (itemAsFloat > msBuildToolsVersion && itemAsFloat < 15) { msBuildToolsVersion = itemAsFloat; } }); } var msbuildDefaults = { stdout: process.stdout, stderr: process.stderr, maxBuffer: MAX_BUFFER, verbosity: 'normal', errorOnFail: true, toolsVersion: msBuildToolsVersion }; gulp.task('clean:node_modules', function(cb) { del(['./src/**/node_modules', './src/client/**/node_modules'], cb) }) gulp.task('clean:build', ['clean:node_modules'], function (cb) { return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, { targets: ['clean'] }))); }); gulp.task('clean:templates', function(cb) { del([ './src/**/Templates/*.cs', ], cb); }); gulp.task('clean:generatedTest', function(cb) { var basePath = './PackageTest/NugetPackageTest'; del([ path.join(basePath, 'Generated/**/*'), path.join(basePath, 'packages/**/*'), ], cb); }); gulp.task('clean', ['clean:build', 'clean:templates', 'clean:generatedTest']); gulp.task('syncDependencies:nugetProj', function() { var dirs = glob.sync(path.join(basePathOrThrow(), '/**/project.json')) .map(function(filePath) { return path.dirname(filePath); }); return gulp.src(dirs.map(function (dir) { return path.join(dir, '/**/AssemblyInfo.cs'); }), { base: './' }) .pipe(nugetProjSync({ default_version: DEFAULT_ASSEMBLY_VERSION })) .pipe(gulp.dest('.')); }) gulp.task('syncDependencies:nuspec', function() { var dirs = glob.sync(path.join(basePathOrThrow(), '/**/packages.config')) .map(function(filePath) { return path.dirname(filePath); }); return gulp.src(dirs.map(function (dir) { return path.join(dir, '/**/*.nuspec'); }), { base: './' }) .pipe(nuspecSync()) .pipe(gulp.dest('.')); }); gulp.task('syncDependencies:runtime', ['syncDependencies:runtime:cs', 'syncDependencies:runtime:csazure', 'syncDependencies:runtime:node', 'syncDependencies:runtime:nodeazure', 'syncDependencies:runtime:ruby', 'syncDependencies:runtime:rubyazure']); gulp.task('syncDependencies', ['syncDependencies:nugetProj', 'syncDependencies:nuspec', 'syncDependencies:runtime']); gulp.task('build', function(cb) { // warning 0219 is for unused variables, which causes the build to fail on xbuild return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, { targets: ['build'], properties: { WarningsNotAsErrors: 0219, Configuration: 'Debug' }, stdout: true, errorOnFail: true }))); }); gulp.task('build:release', function(cb) { // warning 0219 is for unused variables, which causes the build to fail on xbuild return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults,{ targets: ['build'], properties: { WarningsNotAsErrors: 0219, Configuration: 'Release' } }))); }); /* gulp.task('package', function(cb) { return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, { targets: ['package'], verbosity: 'normal', }))); }); */ gulp.task('test:clientruntime:node', shell.task('npm test', { cwd: './src/client/NodeJS/ms-rest/', verbosity: 3 })); gulp.task('test:clientruntime:nodeazure', shell.task('npm test', { cwd: './src/client/NodeJS/ms-rest-azure/', verbosity: 3 })); gulp.task('test:clientruntime:ruby', ['syncDependencies:runtime:ruby'], shell.task('bundle exec rspec', { cwd: './src/client/Ruby/ms-rest/', verbosity: 3 })); gulp.task('test:clientruntime:rubyazure', ['syncDependencies:runtime:rubyazure'], shell.task('bundle exec rspec', { cwd: './src/client/Ruby/ms-rest-azure/', verbosity: 3 })); gulp.task('test:clientruntime:java', shell.task(basePathOrThrow() + '/gradlew :client-runtime:check', { cwd: './', verbosity: 3 })); gulp.task('test:clientruntime:javaazure', shell.task(basePathOrThrow() + '/gradlew :azure-client-runtime:check', { cwd: './', verbosity: 3 })); gulp.task('test:clientruntime:python', shell.task('tox', { cwd: './src/client/Python/msrest/', verbosity: 3 })); gulp.task('test:clientruntime:pythonazure', shell.task('tox', { cwd: './src/client/Python/msrestazure/', verbose:true })); gulp.task('test:clientruntime:javaauthjdk', shell.task(basePathOrThrow() + '/gradlew :azure-client-authentication:check', { cwd: './', verbosity: 3 })); gulp.task('test:clientruntime:javaauthandroid', shell.task(basePathOrThrow() + '/gradlew :azure-android-client-authentication:check', { cwd: './', verbosity: 3 })); gulp.task('test:clientruntime', function (cb) { runSequence('test:clientruntime:node', 'test:clientruntime:nodeazure', 'test:clientruntime:ruby', 'test:clientruntime:rubyazure', 'test:clientruntime:python', 'test:clientruntime:pythonazure', 'test:clientruntime:java', 'test:clientruntime:javaazure', 'test:clientruntime:javaauthjdk', 'test:clientruntime:javaauthandroid', cb); }); gulp.task('test:node', shell.task('npm test', {cwd: './src/generator/AutoRest.NodeJS.Tests/', verbosity: 3})); gulp.task('test:node:azure', shell.task('npm test', {cwd: './src/generator/AutoRest.NodeJS.Azure.Tests/', verbosity: 3})); gulp.task('test:ruby', ['regenerate:expected:ruby'], shell.task('ruby RspecTests/tests_runner.rb', { cwd: './src/generator/AutoRest.Ruby.Tests', verbosity: 3 })); gulp.task('test:ruby:azure', ['regenerate:expected:rubyazure'], shell.task('ruby RspecTests/tests_runner.rb', { cwd: './src/generator/AutoRest.Ruby.Azure.Tests', verbosity: 3 })); gulp.task('test:java', shell.task(basePathOrThrow() + '/gradlew :codegen-tests:check', {cwd: './', verbosity: 3})); gulp.task('test:java:azure', shell.task(basePathOrThrow() + '/gradlew :azure-codegen-tests:check', {cwd: './', verbosity: 3})); gulp.task('test:python', shell.task('tox', {cwd: './src/generator/AutoRest.Python.Tests/', verbosity: 3})); gulp.task('test:python:azure', shell.task('tox', {cwd: './src/generator/AutoRest.Python.Azure.Tests/', verbosity: 3})); gulp.task('test:go', ['regenerate:expected:go'], shell.task([ 'glide up', 'go fmt ./generated/...', 'go run ./runner.go' ], {cwd: './src/generator/AutoRest.Go.Tests/src/tests', verbosity: 3}) ); var xunitTestsDlls = [ ]; var xunitNetCoreXproj = [ 'src/client/Microsoft.Rest.ClientRuntime.Azure.Tests/project.json', 'src/client/Microsoft.Rest.ClientRuntime.Tests/project.json', 'src/core/AutoRest.Core.Tests/project.json', 'src/core/AutoRest.Extensions.Azure.Tests/project.json', 'src/core/AutoRest.Extensions.Tests/project.json', 'src/generator/AutoRest.AzureResourceSchema.Tests/project.json', 'src/generator/AutoRest.CSharp.Azure.Tests/project.json', 'src/generator/AutoRest.CSharp.Tests/project.json', 'src/generator/AutoRest.CSharp.Unit.Tests/project.json', 'src/modeler/AutoRest.CompositeSwagger.Tests/project.json', 'src/modeler/AutoRest.Swagger.Tests/project.json' ]; var defaultShellOptions = { verbosity: 3, env: { AUTOREST_TEST_SERVER_PATH: path.resolve('./src/dev/TestServer') } }; var clrCmd = function(cmd){ return isWindows ? cmd : ('mono ' + cmd); }; var execClrCmd = function(cmd, options){ gutil.log(cmd); return shell(clrCmd(cmd), options); }; var clrTask = function(cmd, options){ return shell.task(clrCmd(cmd), options); }; var xunit = function(template, options){ var xunitRunner = path.resolve('packages/xunit.runner.console.2.1.0/tools/xunit.console.exe'); return execClrCmd(xunitRunner + ' ' + template, options); } var xunitnetcore = function(options){ options.templateData = { f: function (s) { return path.basename(path.dirname(s)) } }; var printStatusCodeCmd = 'echo Status code: %errorlevel%'; if (!isWindows) { printStatusCodeCmd = 'echo Status code: $?'; } var netcoreScript = 'dotnet test "<%= file.path %>" -verbose -xml "' + path.join(basePathOrThrow(), '/TestResults/') + '<%= f(file.path) %>.xml" && ' + printStatusCodeCmd; return shell(netcoreScript, options); } gulp.task('test:xunit', ['test:xunit:netcore'], function () { return gulp.src(xunitTestsDlls).pipe(xunit('<%= file.path %> -noshadow -noappdomain -diagnostics', defaultShellOptions)); }); gulp.task('test:xunit:netcore', ['regenerate:expected:cs', 'regenerate:expected:csazure'], function () { return gulp.src(xunitNetCoreXproj) .pipe(debug()) .pipe(xunitnetcore(defaultShellOptions)); }); var nugetPath = path.resolve('Tools/NuGet.exe'); var nugetTestProjDir = path.resolve('PackageTest/NugetPackageTest'); var packagesDir = path.resolve('binaries/packages'); var cachedClientRuntimePackages = path.join(process.env.HOME || (process.env.HOMEDRIVE + process.env.HOMEPATH), 'AppData', 'Local', 'NuGet', 'Cache', "Microsoft.Rest.ClientRuntime.*.nupkg"); gulp.task('test:nugetPackages:restore', ['test:nugetPackages:clean'], clrTask(nugetPath + ' restore ' + path.join(nugetTestProjDir, '/NugetPackageTest.sln') + ' -source "' + path.resolve(packagesDir) + ';https://www.nuget.org/api/v2/"')); gulp.task('test:nugetPackages:clean', function () { //turn on 'force' so we can remove files outside of repo folder. return del([path.join(nugetTestProjDir, 'Generated'), cachedClientRuntimePackages], {'force' : true}); }); var autoRestExe = function(){ gutil.log(glob.sync(path.join(basePathOrThrow(), 'PackageTest/NugetPackageTest/packages/autorest.*/tools/AutoRest.exe'))); return glob.sync(path.join(basePathOrThrow(), 'PackageTest/NugetPackageTest/packages/autorest.*/tools/AutoRest.exe'))[0]; } gulp.task('test:nugetPackages:generate:csharp', ['test:nugetPackages:restore', 'test:nugetPackages:clean'], function(){ var csharp = autoRestExe() + ' -Modeler Swagger -CodeGenerator CSharp -OutputDirectory ' + path.join(nugetTestProjDir, '/Generated/CSharp') + ' -Namespace Fixtures.Bodynumber -Input <%= file.path %> -Header NONE'; return gulp.src('src/dev/TestServer/swagger/body-number.json').pipe(execClrCmd(csharp, {verbosity: 3})); }); gulp.task('test:nugetPackages:generate:node', ['test:nugetPackages:restore', 'test:nugetPackages:clean'], function(){ var nodejs = autoRestExe() + ' -Modeler Swagger -CodeGenerator NodeJS -OutputDirectory ' + path.join(nugetTestProjDir, '/Generated/NodeJS') + ' -Input <%= file.path %> -Header NONE'; return gulp.src('src/dev/TestServer/swagger/body-number.json').pipe(execClrCmd(nodejs, {verbosity: 3})); }); gulp.task('test:nugetPackages:generate', ['test:nugetPackages:generate:csharp', 'test:nugetPackages:generate:node']); gulp.task('test:nugetPackages:build', ['test:nugetPackages:generate'], function(){ return gulp.src(path.join(nugetTestProjDir, 'NugetPackageCSharpTest.csproj')) .pipe(msbuild(mergeOptions(msbuildDefaults, { targets: ['build'], properties: { WarningsNotAsErrors: 0219, Configuration: 'Debug' } }))); }); gulp.task('test:nugetPackages:xunit', ['test:nugetPackages:build'], function(){ var xunitSrc = gulp.src(path.join(nugetTestProjDir, 'bin/Debug/NuGetPackageCSharpTest.dll')); return xunitSrc.pipe(xunit('<%= file.path %> -noshadow -noappdomain', defaultShellOptions)) }); gulp.task('test:nugetPackages:npm', ['test:nugetPackages:generate'], shell.task('npm test', {cwd: nugetTestProjDir, verbosity: 3})) gulp.task('test', function(cb){ if (isWindows) { runSequence( 'test:xunit', 'test:clientruntime', // DISABLE 'test:nugetPackages:xunit', 'test:node', 'test:node:azure', // DISABLE 'test:nugetPackages:npm', 'test:ruby', 'test:ruby:azure', 'test:java', 'test:java:azure', 'test:python', 'test:python:azure', 'test:go', cb); } else { runSequence( // 'test:xunit', // 'test:clientruntime', 'test:node', 'test:node:azure', 'test:ruby', 'test:ruby:azure', 'test:java', 'test:java:azure', 'test:python', 'test:python:azure', 'test:go', cb); } }); /* gulp.task('analysis', function(cb) { return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, { targets: ['codeanalysis'], properties: { WarningsNotAsErrors: 0219, Configuration: 'Debug' }, }))); }); */ gulp.task('default', function(cb){ // Notes: // Analysis runs rebuild under the covers, so this causes build to be run in DEBUG // The build RELEASE causes release bits to be built, so we can package RELEASE dlls // Test then runs in DEBUG, but uses the packages created in package if (isWindows) { //'analysis', runSequence('clean', 'build:release', 'build', 'test', cb); } else { runSequence('clean', 'build', 'test', cb); } });
1
23,099
minor: I'd add a space between `,` & `'paginggroup'`.
Azure-autorest
java
@@ -42,10 +42,8 @@ def main(): push_p = subparsers.add_parser("push") push_p.add_argument("package", type=str, help="Owner/Package Name") - push_p.set_defaults(func=command.push) - - push_p = subparsers.add_parser("push") - push_p.add_argument("package", type=str, help="Owner/Package Name") + push_p.add_argument("--reupload", action="store_true", + help="Re-upload all fragments, even the ones the server already has") push_p.set_defaults(func=command.push) version_p = subparsers.add_parser("version")
1
""" Parses the command-line arguments and runs a command. """ from __future__ import print_function import argparse import sys import requests from . import command def main(): """ Build and run parser """ parser = argparse.ArgumentParser(description="Quilt Command Line") subparsers = parser.add_subparsers(title="Commands", dest='cmd') subparsers.required = True login_p = subparsers.add_parser("login") login_p.set_defaults(func=command.login) logout_p = subparsers.add_parser("logout") logout_p.set_defaults(func=command.logout) log_p = subparsers.add_parser("log") log_p.add_argument("package", type=str, help="Owner/Package Name") log_p.set_defaults(func=command.log) generate_p = subparsers.add_parser("generate") generate_p.add_argument("directory", help="Source file directory") generate_p.set_defaults(func=command.generate) build_p = subparsers.add_parser("build") build_p.add_argument("package", type=str, help="Owner/Package Name") buildpath_group = build_p.add_mutually_exclusive_group(required=True) buildpath_group.add_argument("path", type=str, nargs='?', help="Path to source files (directory) or the Yaml build file") build_p.set_defaults(func=command.build_from_path) push_p = subparsers.add_parser("push") push_p.add_argument("package", type=str, help="Owner/Package Name") push_p.set_defaults(func=command.push) push_p = subparsers.add_parser("push") push_p.add_argument("package", type=str, help="Owner/Package Name") push_p.set_defaults(func=command.push) version_p = subparsers.add_parser("version") version_subparsers = version_p.add_subparsers(title="version", dest='cmd') version_subparsers.required = True version_list_p = version_subparsers.add_parser("list") version_list_p.add_argument("package", type=str, help="Owner/Package Name") version_list_p.set_defaults(func=command.version_list) version_add_p = version_subparsers.add_parser("add") version_add_p.add_argument("package", type=str, help="Owner/Package Name") version_add_p.add_argument("version", type=str, help="Version") version_add_p.add_argument("pkghash", type=str, help="Package hash") version_add_p.set_defaults(func=command.version_add) tag_p = subparsers.add_parser("tag") tag_subparsers = tag_p.add_subparsers(title="Tag", dest='cmd') tag_subparsers.required = True tag_list_p = tag_subparsers.add_parser("list") tag_list_p.add_argument("package", type=str, help="Owner/Package Name") tag_list_p.set_defaults(func=command.tag_list) tag_add_p = tag_subparsers.add_parser("add") tag_add_p.add_argument("package", type=str, help="Owner/Package Name") tag_add_p.add_argument("tag", type=str, help="Tag name") tag_add_p.add_argument("pkghash", type=str, help="Package hash") tag_add_p.set_defaults(func=command.tag_add) tag_remove_p = tag_subparsers.add_parser("remove") tag_remove_p.add_argument("package", type=str, help="Owner/Package Name") tag_remove_p.add_argument("tag", type=str, help="Tag name") tag_remove_p.set_defaults(func=command.tag_remove) install_p = subparsers.add_parser("install") install_p.add_argument("package", type=str, help="Owner/Package Name") install_p.set_defaults(func=command.install) install_p.add_argument("-f", "--force", action="store_true", help="Overwrite without prompting") install_group = install_p.add_mutually_exclusive_group() install_group.add_argument("-x", "--hash", type=str, help="Package hash") install_group.add_argument("-v", "--version", type=str, help="Package version") install_group.add_argument("-t", "--tag", type=str, help="Package tag - defaults to 'latest'") access_p = subparsers.add_parser("access") access_subparsers = access_p.add_subparsers(title="Access", dest='cmd') access_subparsers.required = True access_list_p = access_subparsers.add_parser("list") access_list_p.add_argument("package", type=str, help="Owner/Package Name") access_list_p.set_defaults(func=command.access_list) access_add_p = access_subparsers.add_parser("add") access_add_p.add_argument("package", type=str, help="Owner/Package Name") access_add_p.add_argument("user", type=str, help="User to add") access_add_p.set_defaults(func=command.access_add) access_remove_p = access_subparsers.add_parser("remove") access_remove_p.add_argument("package", type=str, help="Owner/Package Name") access_remove_p.add_argument("user", type=str, help="User to remove") access_remove_p.set_defaults(func=command.access_remove) ls_p = subparsers.add_parser("ls") ls_p.set_defaults(func=command.ls) inspect_p = subparsers.add_parser("inspect") inspect_p.add_argument("package", type=str, help="Owner/Package Name") inspect_p.set_defaults(func=command.inspect) args = parser.parse_args() # Convert argparse.Namespace into dict and clean it up. # We can then pass it directly to the helper function. kwargs = vars(args) del kwargs['cmd'] func = kwargs.pop('func') try: func(**kwargs) return 0 except command.CommandException as ex: print(ex, file=sys.stderr) return 1 except requests.exceptions.ConnectionError as ex: print("Failed to connect: %s" % ex, file=sys.stderr) return 1
1
15,054
"Re-upload all fragments (even if fragment is already in registry)"
quiltdata-quilt
py
@@ -813,11 +813,15 @@ type PrometheusRuleSpec struct { // upstream Prometheus struct definitions don't have json struct tags. // RuleGroup is a list of sequentially evaluated recording and alerting rules. +// Note: PartialResponseStrategy is only used by ThanosRuler and will +// be ignored by Prometheus instances. Valid values for this field are 'warn' +// or 'abort'. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response // +k8s:openapi-gen=true type RuleGroup struct { - Name string `json:"name"` - Interval string `json:"interval,omitempty"` - Rules []Rule `json:"rules"` + Name string `json:"name"` + Interval string `json:"interval,omitempty"` + Rules []Rule `json:"rules"` + PartialResponseStrategy string `json:"partial_response_strategy,omitempty"` } // Rule describes an alerting or recording rule.
1
// Copyright 2018 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) const ( Version = "v1" PrometheusesKind = "Prometheus" PrometheusName = "prometheuses" PrometheusKindKey = "prometheus" AlertmanagersKind = "Alertmanager" AlertmanagerName = "alertmanagers" AlertManagerKindKey = "alertmanager" ServiceMonitorsKind = "ServiceMonitor" ServiceMonitorName = "servicemonitors" ServiceMonitorKindKey = "servicemonitor" PodMonitorsKind = "PodMonitor" PodMonitorName = "podmonitors" PodMonitorKindKey = "podmonitor" PrometheusRuleKind = "PrometheusRule" PrometheusRuleName = "prometheusrules" PrometheusRuleKindKey = "prometheusrule" ) // Prometheus defines a Prometheus deployment. // +genclient // +k8s:openapi-gen=true type Prometheus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec PrometheusSpec `json:"spec"` // Most recent observed status of the Prometheus cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Status *PrometheusStatus `json:"status,omitempty"` } // PrometheusList is a list of Prometheuses. // +k8s:openapi-gen=true type PrometheusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Prometheuses Items []*Prometheus `json:"items"` } // PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type PrometheusSpec struct { // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata // Metadata Labels and Annotations gets propagated to the prometheus pods. PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"` // ServiceMonitors to be selected for target discovery. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` // Namespaces to be selected for ServiceMonitor discovery. If nil, only // check own namespace. ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` // *Experimental* PodMonitors to be selected for target discovery. PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` // Namespaces to be selected for PodMonitor discovery. If nil, only // check own namespace. PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` // Version of Prometheus to be deployed. Version string `json:"version,omitempty"` // Tag of Prometheus container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. Tag string `json:"tag,omitempty"` // SHA of Prometheus container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. SHA string `json:"sha,omitempty"` // When a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. Paused bool `json:"paused,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Prometheus is being // configured. Image *string `json:"image,omitempty"` // Base image to use for a Prometheus deployment. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Number of instances to deploy for a Prometheus deployment. Replicas *int32 `json:"replicas,omitempty"` // Name of Prometheus external label used to denote replica name. // Defaults to the value of `prometheus_replica`. External label will // _not_ be added when value is set to empty string (`""`). ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` // Name of Prometheus external label used to denote Prometheus instance // name. Defaults to the value of `prometheus`. External label will // _not_ be added when value is set to empty string (`""`). PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` // Time duration Prometheus shall retain data for. Default is '24h', // and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). Retention string `json:"retention,omitempty"` // Maximum amount of disk space used by blocks. RetentionSize string `json:"retentionSize,omitempty"` // Disable prometheus compaction. DisableCompaction bool `json:"disableCompaction,omitempty"` // Enable compression of the write-ahead log using Snappy. This flag is // only available in versions of Prometheus >= 2.11.0. WALCompression *bool `json:"walCompression,omitempty"` // Log level for Prometheus to be configured with. LogLevel string `json:"logLevel,omitempty"` // Log format for Prometheus to be configured with. LogFormat string `json:"logFormat,omitempty"` // Interval between consecutive scrapes. ScrapeInterval string `json:"scrapeInterval,omitempty"` // Interval between consecutive evaluations. EvaluationInterval string `json:"evaluationInterval,omitempty"` // /--rules.*/ command-line arguments. Rules Rules `json:"rules,omitempty"` // The labels to add to any time series or alerts when communicating with // external systems (federation, remote storage, Alertmanager). ExternalLabels map[string]string `json:"externalLabels,omitempty"` // Enable access to prometheus web admin API. Defaults to the value of `false`. // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, // shutdown Prometheus, and more. Enabling this should be done with care and the // user is advised to add additional authentication authorization via a proxy to // ensure only clients authorized to perform these actions can do so. // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` // The external URL the Prometheus instances will be available under. This is // necessary to generate correct URLs. This is necessary if Prometheus is not // served from root of a DNS name. ExternalURL string `json:"externalUrl,omitempty"` // The route prefix Prometheus registers HTTP handlers for. This is useful, // if using ExternalURL and a proxy is rewriting HTTP routes of a request, // and the actual ExternalURL is still true, but the server serves requests // under a different route prefix. For example for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` // QuerySpec defines the query command line flags when starting Prometheus. Query *QuerySpec `json:"query,omitempty"` // Storage spec to specify how storage shall be used. Storage *StorageSpec `json:"storage,omitempty"` // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will // be appended to other volumes that are generated as a result of StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` // A selector to select which PrometheusRules to mount for loading alerting // rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus // Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom // resources selected by RuleSelector. Make sure it does not match any config // maps that you do not want to be migrated. RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` // Namespaces to be selected for PrometheusRules discovery. If unspecified, only // the same namespace as the Prometheus object is in is used. RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` // Define details regarding alerting. Alerting *AlertingSpec `json:"alerting,omitempty"` // Define resources requests and limits for single Pods. Resources v1.ResourceRequirements `json:"resources,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` // Secrets is a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // The Secrets are mounted into /etc/prometheus/secrets/<secret-name>. Secrets []string `json:"secrets,omitempty"` // ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>. ConfigMaps []string `json:"configMaps,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` // If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` // ListenLocal makes the Prometheus server listen on loopback, so that it // does not bind against the Pod IP. ListenLocal bool `json:"listenLocal,omitempty"` // Containers allows injecting additional containers or modifying operator generated // containers. This can be used to allow adding an authentication proxy to a Prometheus pod or // to change the behavior of an operator generated container. Containers described here modify // an operator generated container if they share the same name and modifications are done via a // strategic merge patch. The current container names are: `prometheus`, // `prometheus-config-reloader`, `rules-configmap-reloader`, and `thanos-sidecar`. Overriding // containers is entirely outside the scope of what the maintainers will support and by doing // so, you accept that this behaviour may break at any time without notice. Containers []v1.Container `json:"containers,omitempty"` // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. // fetch secrets for injection into the Prometheus configuration from external sources. Any errors // during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // Using initContainers for any use case other then secret fetching is entirely outside the scope // of what the maintainers will support and by doing so, you accept that this behaviour may break // at any time without notice. InitContainers []v1.Container `json:"initContainers,omitempty"` // AdditionalScrapeConfigs allows specifying a key of a Secret containing // additional Prometheus scrape configurations. Scrape configurations // specified are appended to the configurations generated by the Prometheus // Operator. Job configurations specified must have the form as specified // in the official Prometheus documentation: // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. // As scrape configs are appended, the user is responsible to make sure it // is valid. Note that using this feature may expose the possibility to // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible scrape configs are going to break // Prometheus after the upgrade. AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` // AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing // additional Prometheus alert relabel configurations. Alert relabel configurations // specified are appended to the configurations generated by the Prometheus // Operator. Alert relabel configurations specified must have the form as specified // in the official Prometheus documentation: // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. // As alert relabel configs are appended, the user is responsible to make sure it // is valid. Note that using this feature may expose the possibility to // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible alert relabel configs are going to break // Prometheus after the upgrade. AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` // AdditionalAlertManagerConfigs allows specifying a key of a Secret containing // additional Prometheus AlertManager configurations. AlertManager configurations // specified are appended to the configurations generated by the Prometheus // Operator. Job configurations specified must have the form as specified // in the official Prometheus documentation: // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. // As AlertManager configs are appended, the user is responsible to make sure it // is valid. Note that using this feature may expose the possibility to // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible AlertManager configs are going to break // Prometheus after the upgrade. AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` // APIServerConfig allows specifying a host and auth methods to access apiserver. // If left empty, Prometheus is assumed to run inside of the cluster // and will discover API servers automatically and use the pod's CA certificate // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` // Thanos configuration allows configuring various aspects of a Prometheus // server in a Thanos environment. // // This section is experimental, it may change significantly without // deprecation notice in any release. // // This is experimental and may change significantly without backward // compatibility in any release. Thanos *ThanosSpec `json:"thanos,omitempty"` // Priority class assigned to the Pods PriorityClassName string `json:"priorityClassName,omitempty"` // Port name used for the pods and governing service. // This defaults to web PortName string `json:"portName,omitempty"` // ArbitraryFSAccessThroughSMs configures whether configuration // based on a service monitor can access arbitrary files on the file system // of the Prometheus container e.g. bearer token files. ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` // OverrideHonorLabels if set to true overrides all user configured honor_labels. // If HonorLabels is set in ServiceMonitor or PodMonitor to true, this overrides honor_labels to false. OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` // OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` // IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from // the podmonitor and servicemonitor configs, and they will only discover endpoints // within their current namespace. Defaults to false. IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` // EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert // and metric that is user created. The label value will always be the namespace of the object that is // being created. EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` } // ArbitraryFSAccessThroughSMsConfig enables users to configure, whether // a service monitor selected by the Prometheus instance is allowed to use // arbitrary files on the file system of the Prometheus container. This is the case // when e.g. a service monitor specifies a BearerTokenFile in an endpoint. A // malicious user could create a service monitor selecting arbitrary secret files // in the Prometheus container. Those secrets would then be sent with a scrape // request by Prometheus to a malicious target. Denying the above would prevent the // attack, users can instead use the BearerTokenSecret field. type ArbitraryFSAccessThroughSMsConfig struct { Deny bool `json:"deny,omitempty"` } // PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type PrometheusStatus struct { // Represents whether any actions on the underlaying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this Prometheus deployment // (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this Prometheus deployment // that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this Prometheus deployment. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Prometheus deployment. UnavailableReplicas int32 `json:"unavailableReplicas"` } // AlertingSpec defines parameters for alerting configuration of Prometheus servers. // +k8s:openapi-gen=true type AlertingSpec struct { // AlertmanagerEndpoints Prometheus should fire alerts against. Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` } // StorageSpec defines the configured storage for a group Prometheus servers. // If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used. // +k8s:openapi-gen=true type StorageSpec struct { // EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` // A PVC spec to be used by the Prometheus StatefulSets. VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` } // QuerySpec defines the query command line flags when starting Prometheus. // +k8s:openapi-gen=true type QuerySpec struct { // The delta difference allowed for retrieving metrics during expression evaluations. LookbackDelta *string `json:"lookbackDelta,omitempty"` // Number of concurrent queries that can be run at once. MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` // Maximum number of samples a single query can load into memory. Note that queries will fail if they would load more samples than this into memory, so this also limits the number of samples a query can return. MaxSamples *int32 `json:"maxSamples,omitempty"` // Maximum time a query may take before being aborted. Timeout *string `json:"timeout,omitempty"` } // ThanosSpec defines parameters for a Prometheus server within a Thanos deployment. // +k8s:openapi-gen=true type ThanosSpec struct { // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Thanos is being // configured. Image *string `json:"image,omitempty"` // Version describes the version of Thanos to use. Version *string `json:"version,omitempty"` // Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. Tag *string `json:"tag,omitempty"` // SHA of Thanos container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. SHA *string `json:"sha,omitempty"` // Thanos base image if other than default. BaseImage *string `json:"baseImage,omitempty"` // Resources defines the resource requirements for the Thanos sidecar. // If not provided, no requests/limits will be set Resources v1.ResourceRequirements `json:"resources,omitempty"` // ObjectStorageConfig configures object storage in Thanos. ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` // ListenLocal makes the Thanos sidecar listen on loopback, so that it // does not bind against the Pod IP. ListenLocal bool `json:"listenLocal,omitempty"` } // RemoteWriteSpec defines the remote_write configuration for prometheus. // +k8s:openapi-gen=true type RemoteWriteSpec struct { //The URL of the endpoint to send samples to. URL string `json:"url"` //Timeout for requests to the remote write endpoint. RemoteTimeout string `json:"remoteTimeout,omitempty"` //The list of remote write relabel configurations. WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` //BasicAuth for the URL. BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // File to read bearer token for remote write. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for remote write. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for remote write. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` //Optional ProxyURL ProxyURL string `json:"proxyUrl,omitempty"` // QueueConfig allows tuning of the remote write queue parameters. QueueConfig *QueueConfig `json:"queueConfig,omitempty"` } // QueueConfig allows the tuning of remote_write queue_config parameters. This object // is referenced in the RemoteWriteSpec object. // +k8s:openapi-gen=true type QueueConfig struct { // Capacity is the number of samples to buffer per shard before we start dropping them. Capacity int `json:"capacity,omitempty"` // MinShards is the minimum number of shards, i.e. amount of concurrency. MinShards int `json:"minShards,omitempty"` // MaxShards is the maximum number of shards, i.e. amount of concurrency. MaxShards int `json:"maxShards,omitempty"` // MaxSamplesPerSend is the maximum number of samples per send. MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` // BatchSendDeadline is the maximum time a sample will wait in buffer. BatchSendDeadline string `json:"batchSendDeadline,omitempty"` // MaxRetries is the maximum number of times to retry a batch on recoverable errors. MaxRetries int `json:"maxRetries,omitempty"` // MinBackoff is the initial retry delay. Gets doubled for every retry. MinBackoff string `json:"minBackoff,omitempty"` // MaxBackoff is the maximum retry delay. MaxBackoff string `json:"maxBackoff,omitempty"` } // RemoteReadSpec defines the remote_read configuration for prometheus. // +k8s:openapi-gen=true type RemoteReadSpec struct { //The URL of the endpoint to send samples to. URL string `json:"url"` //An optional list of equality matchers which have to be present // in a selector to query the remote read endpoint. RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` //Timeout for requests to the remote read endpoint. RemoteTimeout string `json:"remoteTimeout,omitempty"` //Whether reads should be made for queries for time ranges that // the local storage should have complete data for. ReadRecent bool `json:"readRecent,omitempty"` //BasicAuth for the URL. BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // bearer token for remote read. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for remote read. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for remote read. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` //Optional ProxyURL ProxyURL string `json:"proxyUrl,omitempty"` } // RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. // It defines `<metric_relabel_configs>`-section of Prometheus configuration. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs // +k8s:openapi-gen=true type RelabelConfig struct { //The source labels select values from existing labels. Their content is concatenated //using the configured separator and matched against the configured regular expression //for the replace, keep, and drop actions. SourceLabels []string `json:"sourceLabels,omitempty"` //Separator placed between concatenated source label values. default is ';'. Separator string `json:"separator,omitempty"` //Label to which the resulting value is written in a replace action. //It is mandatory for replace actions. Regex capture groups are available. TargetLabel string `json:"targetLabel,omitempty"` //Regular expression against which the extracted value is matched. Default is '(.*)' Regex string `json:"regex,omitempty"` // Modulus to take of the hash of the source label values. Modulus uint64 `json:"modulus,omitempty"` //Replacement value against which a regex replace is performed if the //regular expression matches. Regex capture groups are available. Default is '$1' Replacement string `json:"replacement,omitempty"` // Action to perform based on regex matching. Default is 'replace' Action string `json:"action,omitempty"` } // APIServerConfig defines a host and auth methods to access apiserver. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config // +k8s:openapi-gen=true type APIServerConfig struct { // Host of apiserver. // A valid string consisting of a hostname or IP followed by an optional port number Host string `json:"host"` // BasicAuth allow an endpoint to authenticate over basic authentication BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // Bearer token for accessing apiserver. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for accessing apiserver. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for accessing apiserver. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` } // AlertmanagerEndpoints defines a selection of a single Endpoints object // containing alertmanager IPs to fire alerts against. // +k8s:openapi-gen=true type AlertmanagerEndpoints struct { // Namespace of Endpoints object. Namespace string `json:"namespace"` // Name of Endpoints object in Namespace. Name string `json:"name"` // Port the Alertmanager API is exposed on. Port intstr.IntOrString `json:"port"` // Scheme to use when firing alerts. Scheme string `json:"scheme,omitempty"` // Prefix for the HTTP path alerts are pushed to. PathPrefix string `json:"pathPrefix,omitempty"` // TLS Config to use for alertmanager connection. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // BearerTokenFile to read from filesystem to use when authenticating to // Alertmanager. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Version of the Alertmanager API that Prometheus uses to send alerts. It // can be "v1" or "v2". APIVersion string `json:"apiVersion,omitempty"` } // ServiceMonitor defines monitoring for a set of services. // +genclient // +k8s:openapi-gen=true type ServiceMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired Service selection for target discovery by // Prometheus. Spec ServiceMonitorSpec `json:"spec"` } // ServiceMonitorSpec contains specification parameters for a ServiceMonitor. // +k8s:openapi-gen=true type ServiceMonitorSpec struct { // The label to use to retrieve the job name from. JobLabel string `json:"jobLabel,omitempty"` // TargetLabels transfers labels on the Kubernetes Service onto the target. TargetLabels []string `json:"targetLabels,omitempty"` // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. PodTargetLabels []string `json:"podTargetLabels,omitempty"` // A list of endpoints allowed as part of this ServiceMonitor. Endpoints []Endpoint `json:"endpoints"` // Selector to select Endpoints objects. Selector metav1.LabelSelector `json:"selector"` // Selector to select which namespaces the Endpoints objects are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. SampleLimit uint64 `json:"sampleLimit,omitempty"` } // Endpoint defines a scrapeable endpoint serving Prometheus metrics. // +k8s:openapi-gen=true type Endpoint struct { // Name of the service port this endpoint refers to. Mutually exclusive with targetPort. Port string `json:"port,omitempty"` // Name or number of the target port of the endpoint. Mutually exclusive with port. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` // HTTP path to scrape for metrics. Path string `json:"path,omitempty"` // HTTP scheme to use for scraping. Scheme string `json:"scheme,omitempty"` // Optional HTTP URL parameters Params map[string][]string `json:"params,omitempty"` // Interval at which metrics should be scraped Interval string `json:"interval,omitempty"` // Timeout after which the scrape is ended ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // File to read bearer token for scraping targets. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Secret to mount to read bearer token for scraping targets. The secret // needs to be in the same namespace as the service monitor and accessible by // the Prometheus Operator. BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` // HonorLabels chooses the metric's labels on collisions with target labels. HonorLabels bool `json:"honorLabels,omitempty"` // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` // RelabelConfigs to apply to samples before scraping. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. ProxyURL *string `json:"proxyUrl,omitempty"` } // PodMonitor defines monitoring for a set of pods. // +genclient // +k8s:openapi-gen=true type PodMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired Pod selection for target discovery by Prometheus. Spec PodMonitorSpec `json:"spec"` } // PodMonitorSpec contains specification parameters for a PodMonitor. // +k8s:openapi-gen=true type PodMonitorSpec struct { // The label to use to retrieve the job name from. JobLabel string `json:"jobLabel,omitempty"` // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. PodTargetLabels []string `json:"podTargetLabels,omitempty"` // A list of endpoints allowed as part of this PodMonitor. PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` // Selector to select Pod objects. Selector metav1.LabelSelector `json:"selector"` // Selector to select which namespaces the Endpoints objects are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. SampleLimit uint64 `json:"sampleLimit,omitempty"` } // PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. // +k8s:openapi-gen=true type PodMetricsEndpoint struct { // Name of the port this endpoint refers to. Mutually exclusive with targetPort. Port string `json:"port,omitempty"` // Name or number of the target port of the endpoint. Mutually exclusive with port. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` // HTTP path to scrape for metrics. Path string `json:"path,omitempty"` // HTTP scheme to use for scraping. Scheme string `json:"scheme,omitempty"` // Optional HTTP URL parameters Params map[string][]string `json:"params,omitempty"` // Interval at which metrics should be scraped Interval string `json:"interval,omitempty"` // Timeout after which the scrape is ended ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // HonorLabels chooses the metric's labels on collisions with target labels. HonorLabels bool `json:"honorLabels,omitempty"` // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` // RelabelConfigs to apply to samples before ingestion. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. ProxyURL *string `json:"proxyUrl,omitempty"` } // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints // +k8s:openapi-gen=true type BasicAuth struct { // The secret in the service monitor namespace that contains the username // for authentication. Username v1.SecretKeySelector `json:"username,omitempty"` // The secret in the service monitor namespace that contains the password // for authentication. Password v1.SecretKeySelector `json:"password,omitempty"` } // SecretOrConfigMap allows to specify data as a Secret or ConfigMap. Fields are mutually exclusive. type SecretOrConfigMap struct { // Secret containing data to use for the targets. Secret *v1.SecretKeySelector `json:"secret,omitempty"` // ConfigMap containing data to use for the targets. ConfigMap *v1.ConfigMapKeySelector `json:"configMap,omitempty"` } // SecretOrConfigMapValidationError is returned by SecretOrConfigMap.Validate() // on semantically invalid configurations. // +k8s:openapi-gen=false type SecretOrConfigMapValidationError struct { err string } func (e *SecretOrConfigMapValidationError) Error() string { return e.err } // Validate semantically validates the given TLSConfig. func (c *SecretOrConfigMap) Validate() error { if &c.Secret != nil && &c.ConfigMap != nil { return &SecretOrConfigMapValidationError{"SecretOrConfigMap can not specify both Secret and ConfigMap"} } return nil } // TLSConfig specifies TLS configuration parameters. // +k8s:openapi-gen=true type TLSConfig struct { // Path to the CA cert in the Prometheus container to use for the targets. CAFile string `json:"caFile,omitempty"` // Stuct containing the CA cert to use for the targets. CA SecretOrConfigMap `json:"ca,omitempty"` // Path to the client cert file in the Prometheus container for the targets. CertFile string `json:"certFile,omitempty"` // Struct containing the client cert file for the targets. Cert SecretOrConfigMap `json:"cert,omitempty"` // Path to the client key file in the Prometheus container for the targets. KeyFile string `json:"keyFile,omitempty"` // Secret containing the client key file for the targets. KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` // Used to verify the hostname for the targets. ServerName string `json:"serverName,omitempty"` // Disable target certificate validation. InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` } // TLSConfigValidationError is returned by TLSConfig.Validate() on semantically // invalid tls configurations. // +k8s:openapi-gen=false type TLSConfigValidationError struct { err string } func (e *TLSConfigValidationError) Error() string { return e.err } // Validate semantically validates the given TLSConfig. func (c *TLSConfig) Validate() error { if c.CA != (SecretOrConfigMap{}) { if c.CAFile != "" { return &TLSConfigValidationError{"tls config can not both specify CAFile and CA"} } if err := c.CA.Validate(); err != nil { return err } } if c.Cert != (SecretOrConfigMap{}) { if c.CertFile != "" { return &TLSConfigValidationError{"tls config can not both specify CertFile and Cert"} } if err := c.Cert.Validate(); err != nil { return err } } if c.KeyFile != "" && c.KeySecret != nil { return &TLSConfigValidationError{"tls config can not both specify KeyFile and KeySecret"} } return nil } // ServiceMonitorList is a list of ServiceMonitors. // +k8s:openapi-gen=true type ServiceMonitorList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of ServiceMonitors Items []*ServiceMonitor `json:"items"` } // PodMonitorList is a list of PodMonitors. // +k8s:openapi-gen=true type PodMonitorList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of PodMonitors Items []*PodMonitor `json:"items"` } // PrometheusRuleList is a list of PrometheusRules. // +k8s:openapi-gen=true type PrometheusRuleList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Rules Items []*PrometheusRule `json:"items"` } // PrometheusRule defines alerting rules for a Prometheus instance // +genclient // +k8s:openapi-gen=true type PrometheusRule struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired alerting rule definitions for Prometheus. Spec PrometheusRuleSpec `json:"spec"` } // PrometheusRuleSpec contains specification parameters for a Rule. // +k8s:openapi-gen=true type PrometheusRuleSpec struct { // Content of Prometheus rule file Groups []RuleGroup `json:"groups,omitempty"` } // RuleGroup and Rule are copied instead of vendored because the // upstream Prometheus struct definitions don't have json struct tags. // RuleGroup is a list of sequentially evaluated recording and alerting rules. // +k8s:openapi-gen=true type RuleGroup struct { Name string `json:"name"` Interval string `json:"interval,omitempty"` Rules []Rule `json:"rules"` } // Rule describes an alerting or recording rule. // +k8s:openapi-gen=true type Rule struct { Record string `json:"record,omitempty"` Alert string `json:"alert,omitempty"` Expr intstr.IntOrString `json:"expr"` For string `json:"for,omitempty"` Labels map[string]string `json:"labels,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` } // Alertmanager describes an Alertmanager cluster. // +genclient // +k8s:openapi-gen=true type Alertmanager struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Alertmanager cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec AlertmanagerSpec `json:"spec"` // Most recent observed status of the Alertmanager cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Status *AlertmanagerStatus `json:"status,omitempty"` } // AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerSpec struct { // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata // Metadata Labels and Annotations gets propagated to the prometheus pods. PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Alertmanager is being // configured. Image *string `json:"image,omitempty"` // Version the cluster should be on. Version string `json:"version,omitempty"` // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. Tag string `json:"tag,omitempty"` // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. SHA string `json:"sha,omitempty"` // Base image that is used to deploy pods, without tag. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Secrets is a list of Secrets in the same namespace as the Alertmanager // object, which shall be mounted into the Alertmanager Pods. // The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>. Secrets []string `json:"secrets,omitempty"` // ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager // object, which shall be mounted into the Alertmanager Pods. // The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>. ConfigMaps []string `json:"configMaps,omitempty"` // ConfigSecret is the name of a Kubernetes Secret in the same namespace as the // Alertmanager object, which contains configuration for this Alertmanager // instance. Defaults to 'alertmanager-<alertmanager-name>' // The secret is mounted into /etc/alertmanager/config. ConfigSecret string `json:"configSecret,omitempty"` // Log level for Alertmanager to be configured with. LogLevel string `json:"logLevel,omitempty"` // Log format for Alertmanager to be configured with. LogFormat string `json:"logFormat,omitempty"` // Size is the expected size of the alertmanager cluster. The controller will // eventually make the size of the running cluster equal to the expected // size. Replicas *int32 `json:"replicas,omitempty"` // Time duration Alertmanager shall retain data for. Default is '120h', // and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). Retention string `json:"retention,omitempty"` // Storage is the definition of how storage will be used by the Alertmanager // instances. Storage *StorageSpec `json:"storage,omitempty"` // Volumes allows configuration of additional volumes on the output StatefulSet definition. // Volumes specified will be appended to other volumes that are generated as a result of // StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. // VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, // that are generated as a result of StorageSpec objects. VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` // The external URL the Alertmanager instances will be available under. This is // necessary to generate correct URLs. This is necessary if Alertmanager is not // served from root of a DNS name. ExternalURL string `json:"externalUrl,omitempty"` // The route prefix Alertmanager registers HTTP handlers for. This is useful, // if using ExternalURL and a proxy is rewriting HTTP routes of a request, // and the actual ExternalURL is still true, but the server serves requests // under a different route prefix. For example for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` // If set to true all actions on the underlaying managed objects are not // goint to be performed, except for delete actions. Paused bool `json:"paused,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Define resources requests and limits for single Pods. Resources v1.ResourceRequirements `json:"resources,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` // ListenLocal makes the Alertmanager server listen on loopback, so that it // does not bind against the Pod IP. Note this is only for the Alertmanager // UI, not the gossip communication. ListenLocal bool `json:"listenLocal,omitempty"` // Containers allows injecting additional containers. This is meant to // allow adding an authentication proxy to an Alertmanager pod. Containers []v1.Container `json:"containers,omitempty"` // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. // fetch secrets for injection into the Alertmanager configuration from external sources. Any // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // Using initContainers for any use case other then secret fetching is entirely outside the scope // of what the maintainers will support and by doing so, you accept that this behaviour may break // at any time without notice. InitContainers []v1.Container `json:"initContainers,omitempty"` // Priority class assigned to the Pods PriorityClassName string `json:"priorityClassName,omitempty"` // AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. AdditionalPeers []string `json:"additionalPeers,omitempty"` // Port name used for the pods and governing service. // This defaults to web PortName string `json:"portName,omitempty"` } // AlertmanagerList is a list of Alertmanagers. // +k8s:openapi-gen=true type AlertmanagerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Alertmanagers Items []Alertmanager `json:"items"` } // AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerStatus struct { // Represents whether any actions on the underlaying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this Alertmanager // cluster (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this Alertmanager // cluster that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this Alertmanager cluster. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Alertmanager cluster. UnavailableReplicas int32 `json:"unavailableReplicas"` } // NamespaceSelector is a selector for selecting either all namespaces or a // list of namespaces. // +k8s:openapi-gen=true type NamespaceSelector struct { // Boolean describing whether all namespaces are selected in contrast to a // list restricting them. Any bool `json:"any,omitempty"` // List of namespace names. MatchNames []string `json:"matchNames,omitempty"` // TODO(fabxc): this should embed metav1.LabelSelector eventually. // Currently the selector is only used for namespaces which require more complex // implementation to support label selections. } // /--rules.*/ command-line arguments // +k8s:openapi-gen=true type Rules struct { Alert RulesAlert `json:"alert,omitempty"` } // /--rules.alert.*/ command-line arguments // +k8s:openapi-gen=true type RulesAlert struct { // Max time to tolerate prometheus outage for restoring 'for' state of alert. ForOutageTolerance string `json:"forOutageTolerance,omitempty"` // Minimum duration between alert and restored 'for' state. // This is maintained only for alerts with configured 'for' time greater than grace period. ForGracePeriod string `json:"forGracePeriod,omitempty"` // Minimum amount of time to wait before resending an alert to Alertmanager. ResendDelay string `json:"resendDelay,omitempty"` } // DeepCopyObject implements the runtime.Object interface. func (l *Alertmanager) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *AlertmanagerList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *Prometheus) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PrometheusList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *ServiceMonitor) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PodMonitor) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PodMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (f *PrometheusRule) DeepCopyObject() runtime.Object { return f.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PrometheusRuleList) DeepCopyObject() runtime.Object { return l.DeepCopy() }
1
13,660
I don't see us ignoring the field. Are we sure Prometheus wouldn't refuse to load the rules?
prometheus-operator-prometheus-operator
go
@@ -314,7 +314,7 @@ public class WindowWidget extends UIWidget implements SessionChangeListener, super.onResume(); if (isVisible() || mIsInVRVideoMode) { mSession.setActive(true); - if (!SettingsStore.getInstance(getContext()).getLayersEnabled()) { + if (!SettingsStore.getInstance(getContext()).getLayersEnabled() && !mSession.hasDisplay()) { // Ensure the Gecko Display is correctly recreated. // See: https://github.com/MozillaReality/FirefoxReality/issues/2880 callSurfaceChanged();
1
/* -*- Mode: Java; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: nil; -*- * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package org.mozilla.vrbrowser.ui.widgets; import android.content.Context; import android.content.res.Configuration; import android.graphics.Canvas; import android.graphics.Matrix; import android.graphics.PointF; import android.graphics.Rect; import android.graphics.RectF; import android.graphics.SurfaceTexture; import android.net.Uri; import android.util.Log; import android.util.Pair; import android.view.KeyEvent; import android.view.MotionEvent; import android.view.Surface; import android.view.View; import android.view.inputmethod.EditorInfo; import android.view.inputmethod.InputConnection; import androidx.annotation.IntDef; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.StringRes; import androidx.annotation.UiThread; import androidx.lifecycle.ViewModelProvider; import org.mozilla.geckoview.AllowOrDeny; import org.mozilla.geckoview.GeckoResult; import org.mozilla.geckoview.GeckoSession; import org.mozilla.geckoview.MediaElement; import org.mozilla.geckoview.PanZoomController; import org.mozilla.vrbrowser.R; import org.mozilla.vrbrowser.VRBrowserActivity; import org.mozilla.vrbrowser.VRBrowserApplication; import org.mozilla.vrbrowser.browser.BookmarksStore; import org.mozilla.vrbrowser.browser.Media; import org.mozilla.vrbrowser.browser.PromptDelegate; import org.mozilla.vrbrowser.browser.SessionChangeListener; import org.mozilla.vrbrowser.browser.SettingsStore; import org.mozilla.vrbrowser.browser.VideoAvailabilityListener; import org.mozilla.vrbrowser.browser.engine.Session; import org.mozilla.vrbrowser.browser.engine.SessionStore; import org.mozilla.vrbrowser.telemetry.GleanMetricsService; import org.mozilla.vrbrowser.telemetry.TelemetryWrapper; import org.mozilla.vrbrowser.ui.adapters.Bookmark; import org.mozilla.vrbrowser.ui.callbacks.BookmarksCallback; import org.mozilla.vrbrowser.ui.callbacks.HistoryCallback; import org.mozilla.vrbrowser.ui.callbacks.LibraryItemContextMenuClickCallback; import org.mozilla.vrbrowser.ui.viewmodel.WindowViewModel; import org.mozilla.vrbrowser.ui.views.BookmarksView; import org.mozilla.vrbrowser.ui.views.HistoryView; import org.mozilla.vrbrowser.ui.widgets.dialogs.ClearHistoryDialogWidget; import org.mozilla.vrbrowser.ui.widgets.dialogs.PromptDialogWidget; import org.mozilla.vrbrowser.ui.widgets.dialogs.SelectionActionWidget; import org.mozilla.vrbrowser.ui.widgets.menus.ContextMenuWidget; import org.mozilla.vrbrowser.ui.widgets.menus.LibraryMenuWidget; import org.mozilla.vrbrowser.utils.StringUtils; import org.mozilla.vrbrowser.utils.UrlUtils; import org.mozilla.vrbrowser.utils.ViewUtils; import java.util.Arrays; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.stream.Collectors; import java.util.stream.Stream; import mozilla.components.concept.storage.PageObservation; import mozilla.components.concept.storage.PageVisit; import mozilla.components.concept.storage.RedirectSource; import mozilla.components.concept.storage.VisitInfo; import mozilla.components.concept.storage.VisitType; import static org.mozilla.vrbrowser.ui.widgets.settings.SettingsView.SettingViewType.FXA; import static org.mozilla.vrbrowser.utils.ServoUtils.isInstanceOfServoSession; public class WindowWidget extends UIWidget implements SessionChangeListener, GeckoSession.ContentDelegate, GeckoSession.NavigationDelegate, VideoAvailabilityListener, GeckoSession.HistoryDelegate, GeckoSession.ProgressDelegate, GeckoSession.SelectionActionDelegate { @IntDef(value = { SESSION_RELEASE_DISPLAY, SESSION_DO_NOT_RELEASE_DISPLAY}) public @interface OldSessionDisplayAction {} public static final int SESSION_RELEASE_DISPLAY = 0; public static final int SESSION_DO_NOT_RELEASE_DISPLAY = 1; private Surface mSurface; private int mWidth; private int mHeight; private int mHandle; private WidgetPlacement mWidgetPlacement; private TopBarWidget mTopBar; private TitleBarWidget mTitleBar; private WidgetManagerDelegate mWidgetManager; private PromptDialogWidget mAlertDialog; private PromptDialogWidget mConfirmDialog; private PromptDialogWidget mAppDialog; private ClearHistoryDialogWidget mClearHistoryDialog; private ContextMenuWidget mContextMenu; private SelectionActionWidget mSelectionMenu; private LibraryMenuWidget mLibraryItemContextMenu; private int mWidthBackup; private int mHeightBackup; private int mBorderWidth; private Runnable mFirstDrawCallback; private boolean mIsInVRVideoMode; private View mView; private Session mSession; private int mWindowId; private BookmarksView mBookmarksView; private HistoryView mHistoryView; private Windows.WindowPlacement mWindowPlacement = Windows.WindowPlacement.FRONT; private Windows.WindowPlacement mWindowPlacementBeforeFullscreen = Windows.WindowPlacement.FRONT; private float mMaxWindowScale = 3; private boolean mIsRestored = false; private CopyOnWriteArrayList<WindowListener> mListeners; boolean mActive = false; boolean mHovered = false; boolean mClickedAfterFocus = false; boolean mIsBookmarksVisible = false; boolean mIsHistoryVisible = false; private WidgetPlacement mPlacementBeforeFullscreen; private WidgetPlacement mPlacementBeforeResize; private boolean mIsResizing; private boolean mIsFullScreen; private boolean mAfterFirstPaint; private boolean mCaptureOnPageStop; private PromptDelegate mPromptDelegate; private Executor mUIThreadExecutor; private WindowViewModel mViewModel; private CopyOnWriteArrayList<Runnable> mSetViewQueuedCalls; public interface WindowListener { default void onFocusRequest(@NonNull WindowWidget aWindow) {} default void onBorderChanged(@NonNull WindowWidget aWindow) {} default void onSessionChanged(@NonNull Session aOldSession, @NonNull Session aSession) {} default void onFullScreen(@NonNull WindowWidget aWindow, boolean aFullScreen) {} default void onVideoAvailabilityChanged(@NonNull WindowWidget aWindow) {} } public WindowWidget(Context aContext, int windowId, boolean privateMode) { super(aContext); mWindowId = windowId; mSession = SessionStore.get().createSession(privateMode); initialize(aContext); } public WindowWidget(Context aContext, int windowId, Session aSession) { super(aContext); mWindowId = windowId; mSession = aSession; initialize(aContext); } private void initialize(Context aContext) { mSetViewQueuedCalls = new CopyOnWriteArrayList<>(); mWidgetManager = (WidgetManagerDelegate) aContext; mBorderWidth = SettingsStore.getInstance(aContext).getTransparentBorderWidth(); // ModelView creation and observers setup mViewModel = new ViewModelProvider( (VRBrowserActivity)getContext(), ViewModelProvider.AndroidViewModelFactory.getInstance(((VRBrowserActivity) getContext()).getApplication())) .get(String.valueOf(hashCode()), WindowViewModel.class); mViewModel.setIsPrivateSession(mSession.isPrivateMode()); mViewModel.setUrl(mSession.getCurrentUri()); mUIThreadExecutor = ((VRBrowserApplication)getContext().getApplicationContext()).getExecutors().mainThread(); mListeners = new CopyOnWriteArrayList<>(); setupListeners(mSession); mBookmarksView = new BookmarksView(aContext); mBookmarksView.addBookmarksListener(mBookmarksViewListener); mHistoryView = new HistoryView(aContext); mHistoryView.addHistoryListener(mHistoryListener); SessionStore.get().getBookmarkStore().addListener(mBookmarksListener); mHandle = ((WidgetManagerDelegate)aContext).newWidgetHandle(); mWidgetPlacement = new WidgetPlacement(aContext); mPlacementBeforeFullscreen = new WidgetPlacement(aContext); mPlacementBeforeResize = new WidgetPlacement(aContext); mIsResizing = false; mIsFullScreen = false; initializeWidgetPlacement(mWidgetPlacement); if (mSession.isPrivateMode()) { mWidgetPlacement.clearColor = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_private_clear_color)); } else { mWidgetPlacement.clearColor = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_blank_clear_color)); } mTopBar = new TopBarWidget(aContext); mTopBar.attachToWindow(this); mTitleBar = new TitleBarWidget(aContext); mTitleBar.attachToWindow(this); mPromptDelegate = new PromptDelegate(getContext()); mPromptDelegate.attachToWindow(this); setFocusable(true); TelemetryWrapper.openWindowEvent(mWindowId); GleanMetricsService.openWindowEvent(mWindowId); if (mSession.getGeckoSession() != null) { onCurrentSessionChange(null, mSession.getGeckoSession()); } } @Override protected void initializeWidgetPlacement(WidgetPlacement aPlacement) { int windowWidth = SettingsStore.getInstance(getContext()).getWindowWidth(); aPlacement.width = windowWidth + mBorderWidth * 2; aPlacement.height = SettingsStore.getInstance(getContext()).getWindowHeight() + mBorderWidth * 2; aPlacement.worldWidth = WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width) * (float)windowWidth / (float)SettingsStore.WINDOW_WIDTH_DEFAULT; aPlacement.density = 1.0f; aPlacement.visible = true; aPlacement.cylinder = true; aPlacement.textureScale = 1.0f; aPlacement.name = "Window"; // Check Windows.placeWindow method for remaining placement set-up } public void setPopUpDelegate(@Nullable PromptDelegate.PopUpDelegate delegate) { mPromptDelegate.setPopupDelegate(delegate); } void setupListeners(Session aSession) { aSession.addSessionChangeListener(this); aSession.addContentListener(this); aSession.addVideoAvailabilityListener(this); aSession.addNavigationListener(this); aSession.addProgressListener(this); aSession.setHistoryDelegate(this); aSession.addSelectionActionListener(this); } void cleanListeners(Session aSession) { aSession.removeSessionChangeListener(this); aSession.removeContentListener(this); aSession.removeVideoAvailabilityListener(this); aSession.removeNavigationListener(this); aSession.removeProgressListener(this); aSession.setHistoryDelegate(null); aSession.removeSelectionActionListener(this); } @Override public void show(@ShowFlags int aShowFlags) { if (!mWidgetPlacement.visible) { mWidgetPlacement.visible = true; } mWidgetManager.updateWidget(this); setFocusableInTouchMode(false); if (aShowFlags == REQUEST_FOCUS) { requestFocusFromTouch(); } else { clearFocus(); } mSession.setActive(true); } @Override public void hide(@HideFlags int aHideFlag) { if (mWidgetPlacement.visible) { mWidgetPlacement.visible = false; } mWidgetManager.updateWidget(this); clearFocus(); mSession.setActive(false); } @Override protected void onDismiss() { if (isBookmarksVisible()) { hideBookmarks(); } else if (isHistoryVisible()) { hideHistory(); } else { if (mSession.canGoBack()) { mSession.goBack(); } } } @Override public void onPause() { super.onPause(); mSession.setActive(false); } @Override public void onResume() { super.onResume(); if (isVisible() || mIsInVRVideoMode) { mSession.setActive(true); if (!SettingsStore.getInstance(getContext()).getLayersEnabled()) { // Ensure the Gecko Display is correctly recreated. // See: https://github.com/MozillaReality/FirefoxReality/issues/2880 callSurfaceChanged(); } } } @Override public void onConfigurationChanged(Configuration newConfig) { super.onConfigurationChanged(newConfig); mHistoryView.updateUI(); mBookmarksView.updateUI(); mViewModel.refresh(); } public void close() { TelemetryWrapper.closeWindowEvent(mWindowId); GleanMetricsService.closeWindowEvent(mWindowId); hideContextMenus(); releaseWidget(); mBookmarksView.onDestroy(); mHistoryView.onDestroy(); mViewModel.setIsTopBarVisible(false); mViewModel.setIsTitleBarVisible(false); SessionStore.get().destroySession(mSession); if (mTopBar != null) { mWidgetManager.removeWidget(mTopBar); mTopBar.setDelegate((TopBarWidget.Delegate) null); } if (mTitleBar != null) { mWidgetManager.removeWidget(mTitleBar); mTitleBar.setDelegate((TitleBarWidget.Delegate) null); } mListeners.clear(); } public void loadHomeIfNotRestored() { if (!mIsRestored) { loadHome(); } } public void loadHome() { if (mSession.isPrivateMode()) { mSession.loadPrivateBrowsingPage(); } else { mSession.loadUri(SettingsStore.getInstance(getContext()).getHomepage()); } } protected void setRestored(boolean restored) { mIsRestored = restored; } private void setView(View view, boolean switchSurface) { Runnable setView = () -> { if (switchSurface) { pauseCompositor(); } mView = view; removeView(view); mView.setVisibility(VISIBLE); addView(mView); if (switchSurface) { mWidgetPlacement.density = getContext().getResources().getDisplayMetrics().density; if (mTexture != null && mSurface != null && mRenderer == null) { // Create the UI Renderer for the current surface. // Surface must be released when switching back to WebView surface or the browser // will not render it correctly. See release code in unsetView(). mRenderer = new UISurfaceTextureRenderer(mSurface, mWidgetPlacement.textureWidth(), mWidgetPlacement.textureHeight()); } mWidgetManager.updateWidget(WindowWidget.this); mWidgetManager.pushWorldBrightness(WindowWidget.this, WidgetManagerDelegate.DEFAULT_DIM_BRIGHTNESS); mWidgetManager.pushBackHandler(mBackHandler); setWillNotDraw(false); postInvalidate(); } }; if (mAfterFirstPaint) { setView.run(); } else { mSetViewQueuedCalls.add(setView); } } private void unsetView(View view, boolean switchSurface) { mSetViewQueuedCalls.clear(); if (mView != null && mView == view) { mView = null; removeView(view); view.setVisibility(GONE); if (switchSurface) { setWillNotDraw(true); if (mTexture != null) { // Surface must be recreated here when not using layers. // When using layers the new Surface is received via the setSurface() method. if (mRenderer != null) { mRenderer.release(); mRenderer = null; } mSurface = new Surface(mTexture); } mWidgetPlacement.density = 1.0f; mWidgetManager.updateWidget(WindowWidget.this); mWidgetManager.popWorldBrightness(WindowWidget.this); mWidgetManager.popBackHandler(mBackHandler); } } } public boolean isBookmarksVisible() { return (mView != null && mView == mBookmarksView); } public boolean isHistoryVisible() { return (mView != null && mView == mHistoryView); } public int getWindowWidth() { return mWidgetPlacement.width; } public int getWindowHeight() { return mWidgetPlacement.height; } public void switchBookmarks() { if (isHistoryVisible()) { hideHistory(false); showBookmarks(false); } else if (isBookmarksVisible()) { hideBookmarks(); } else { showBookmarks(); } } private void showBookmarks() { showBookmarks(true); } private void showBookmarks(boolean switchSurface) { if (mView == null) { setView(mBookmarksView, switchSurface); mBookmarksView.onShow(); mViewModel.setIsBookmarksVisible(true); mIsBookmarksVisible = true; } } public void hideBookmarks() { hideBookmarks(true); } private void hideBookmarks(boolean switchSurface) { if (mView != null) { unsetView(mBookmarksView, switchSurface); mViewModel.setIsBookmarksVisible(false); mIsBookmarksVisible = false; } } public void switchHistory() { if (isBookmarksVisible()) { hideBookmarks(false); showHistory(false); } else if (isHistoryVisible()) { hideHistory(); } else { showHistory(); } } private void hideLibraryPanels() { if (isBookmarksVisible()) { hideBookmarks(); } else if (isHistoryVisible()) { hideHistory(); } } private void showHistory() { showHistory(true); } private void showHistory(boolean switchSurface) { if (mView == null) { setView(mHistoryView, switchSurface); mHistoryView.onShow(); mViewModel.setIsHistoryVisible(true); mIsHistoryVisible = true; } } public void hideHistory() { hideHistory(true); } public void hideHistory(boolean switchSurface) { if (mView != null) { unsetView(mHistoryView, switchSurface); mViewModel.setIsHistoryVisible(false); mIsHistoryVisible = false; } } public void pauseCompositor() { if (mSession == null) { return; } mSession.surfaceDestroyed(); } public void resumeCompositor() { if (mSession == null) { return; } if (mSurface == null) { return; } callSurfaceChanged(); } public void enableVRVideoMode(int aVideoWidth, int aVideoHeight, boolean aResetBorder) { if (!mIsInVRVideoMode) { mWidthBackup = mWidth; mHeightBackup = mHeight; mIsInVRVideoMode = true; } boolean borderChanged = aResetBorder && mBorderWidth > 0; if (aVideoWidth == mWidth && aVideoHeight == mHeight && !borderChanged) { return; } if (aResetBorder) { mBorderWidth = 0; } mWidgetPlacement.width = aVideoWidth + mBorderWidth * 2; mWidgetPlacement.height = aVideoHeight + mBorderWidth * 2; mWidgetManager.updateWidget(this); } public void disableVRVideoMode() { if (!mIsInVRVideoMode || mWidthBackup == 0 || mHeightBackup == 0) { return; } mIsInVRVideoMode = false; int border = SettingsStore.getInstance(getContext()).getTransparentBorderWidth(); if (mWidthBackup == mWidth && mHeightBackup == mHeight && border == mBorderWidth) { return; } mBorderWidth = border; mWidgetPlacement.width = mWidthBackup; mWidgetPlacement.height = mHeightBackup; mWidgetManager.updateWidget(this); } public void setWindowPlacement(@NonNull Windows.WindowPlacement aPlacement) { if (mActive) { TelemetryWrapper.activePlacementEvent(mWindowPlacement.getValue(), false); } mWindowPlacement = aPlacement; mViewModel.setPlacement(mWindowPlacement); if (mActive) { TelemetryWrapper.activePlacementEvent(mWindowPlacement.getValue(), true); } } public void setIsOnlyWindow(boolean isOnlyWindow) { mViewModel.setIsOnlyWindow(isOnlyWindow); } public @NonNull Windows.WindowPlacement getWindowPlacementBeforeFullscreen() { return mWindowPlacementBeforeFullscreen; } public @NonNull Windows.WindowPlacement getWindowPlacement() { return mWindowPlacement; } @Override public void resizeByMultiplier(float aspect, float multiplier) { Pair<Float, Float> targetSize = getSizeForScale(multiplier, aspect); handleResizeEvent(targetSize.first, targetSize.second); } public float getCurrentScale() { float currentAspect = getCurrentAspect(); float currentWorldHeight = mWidgetPlacement.worldWidth / currentAspect; float currentArea = mWidgetPlacement.worldWidth * currentWorldHeight; float defaultWidth = WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width); float defaultHeight = defaultWidth / SettingsStore.getInstance(getContext()).getWindowAspect(); float defaultArea = defaultWidth * defaultHeight; return currentArea / defaultArea; } public float getCurrentAspect() { return (float) mWidgetPlacement.width / (float) mWidgetPlacement.height; } public int getBorderWidth() { return mBorderWidth; } public void setActiveWindow(boolean active) { mActive = active; if (active) { SessionStore.get().setActiveSession(mSession); GeckoSession session = mSession.getGeckoSession(); if (session != null) { session.getTextInput().setView(this); } mSession.updateLastUse(); mWidgetManager.getNavigationBar().addNavigationBarListener(mNavigationBarListener); } else { mWidgetManager.getNavigationBar().removeNavigationBarListener(mNavigationBarListener); updateBookmarked(); } hideContextMenus(); TelemetryWrapper.activePlacementEvent(mWindowPlacement.getValue(), mActive); updateBorder(); mViewModel.setIsActiveWindow(active); // Remove tha back handler in case there is a library view visible, otherwise it gets dismissed // when back is clicked even if other window is focused. if (mView != null) { if (active) { mWidgetManager.pushBackHandler(mBackHandler); } else { mWidgetManager.popBackHandler(mBackHandler); } } } @Nullable public Session getSession() { return mSession; } public TopBarWidget getTopBar() { return mTopBar; } public void setTopBar(TopBarWidget aWidget) { if (mTopBar != aWidget) { mTopBar = aWidget; mTopBar.attachToWindow(this); } } public void setResizeMode(boolean resizing) { mViewModel.setIsResizeMode(resizing); } public TitleBarWidget getTitleBar() { return mTitleBar; } @Override public void setSurfaceTexture(SurfaceTexture aTexture, final int aWidth, final int aHeight, Runnable aFirstDrawCallback) { mFirstDrawCallback = aFirstDrawCallback; if (mView != null) { super.setSurfaceTexture(aTexture, aWidth, aHeight, aFirstDrawCallback); } else { GeckoSession session = mSession.getGeckoSession(); if (session == null) { return; } if (aTexture == null) { setWillNotDraw(true); return; } mWidth = aWidth; mHeight = aHeight; mTexture = aTexture; aTexture.setDefaultBufferSize(aWidth, aHeight); mSurface = new Surface(aTexture); callSurfaceChanged(); } } @Override public void setSurface(Surface aSurface, final int aWidth, final int aHeight, Runnable aFirstDrawCallback) { if (mView != null) { super.setSurface(aSurface, aWidth, aHeight, aFirstDrawCallback); } else { mWidth = aWidth; mHeight = aHeight; mSurface = aSurface; mFirstDrawCallback = aFirstDrawCallback; if (mSurface != null) { callSurfaceChanged(); } else { mSession.surfaceDestroyed(); } } } private void callSurfaceChanged() { if (mSession != null && mSurface != null) { mSession.surfaceChanged(mSurface, mBorderWidth, mBorderWidth, mWidth - mBorderWidth * 2, mHeight - mBorderWidth * 2); mSession.updateLastUse(); } } @Override public void resizeSurface(final int aWidth, final int aHeight) { if (mView != null) { super.resizeSurface(aWidth, aHeight); } mWidth = aWidth; mHeight = aHeight; if (mTexture != null) { mTexture.setDefaultBufferSize(aWidth, aHeight); } if (mSurface != null && mView == null) { callSurfaceChanged(); } } @Override public int getHandle() { return mHandle; } @Override public WidgetPlacement getPlacement() { return mWidgetPlacement; } @Override public void handleTouchEvent(MotionEvent aEvent) { if (aEvent.getAction() == MotionEvent.ACTION_DOWN) { if (!mActive) { mClickedAfterFocus = true; updateBorder(); // Focus this window for (WindowListener listener: mListeners) { listener.onFocusRequest(this); } // Return to discard first click after focus return; } } else if (aEvent.getAction() == MotionEvent.ACTION_UP || aEvent.getAction() == MotionEvent.ACTION_CANCEL) { mClickedAfterFocus = false; updateBorder(); } if (!mActive) { // Do not send touch events to not focused windows. return; } if (mView != null) { super.handleTouchEvent(aEvent); } else { if (aEvent.getActionMasked() == MotionEvent.ACTION_DOWN) { requestFocus(); requestFocusFromTouch(); } GeckoSession session = mSession.getGeckoSession(); if (session != null) { session.getPanZoomController().onTouchEvent(aEvent); } } } @Override public void handleHoverEvent(MotionEvent aEvent) { if (aEvent.getAction() == MotionEvent.ACTION_HOVER_ENTER) { mHovered = true; updateBorder(); } else if (aEvent.getAction() == MotionEvent.ACTION_HOVER_EXIT) { mHovered = false; updateBorder(); } if (!mActive) { // Do not send touch events to not focused windows. return; } if (mView != null) { super.handleHoverEvent(aEvent); } else { GeckoSession session = mSession.getGeckoSession(); if (session != null) { session.getPanZoomController().onMotionEvent(aEvent); } } } protected void updateBorder() { int color = 0; if (!mActive && !mClickedAfterFocus && mHovered) { color = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_border_hover)); } else if (mClickedAfterFocus) { color = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_border_click)); } if (mWidgetPlacement.borderColor != color) { mWidgetPlacement.borderColor = color; mWidgetManager.updateWidget(this); for (WindowListener listener: mListeners) { listener.onBorderChanged(this); } } } public void saveBeforeFullscreenPlacement() { mWindowPlacementBeforeFullscreen = mWindowPlacement; mPlacementBeforeFullscreen.copyFrom(mWidgetPlacement); } public void restoreBeforeFullscreenPlacement() { mWindowPlacement = mWindowPlacementBeforeFullscreen; mWidgetPlacement.copyFrom(mPlacementBeforeFullscreen); } public WidgetPlacement getBeforeFullscreenPlacement() { return mPlacementBeforeFullscreen; } public void saveBeforeResizePlacement() { mPlacementBeforeResize.copyFrom(mWidgetPlacement); } public void restoreBeforeResizePlacement() { mWidgetPlacement.copyFrom(mPlacementBeforeResize); } public WidgetPlacement getBeforeResizePlacement() { return mPlacementBeforeResize; } public void setIsResizing(boolean isResizing) { mIsResizing = isResizing; } public boolean isResizing() { return mIsResizing; } public void setIsFullScreen(boolean isFullScreen) { if (isFullScreen != mIsFullScreen) { mIsFullScreen = isFullScreen; mViewModel.setIsFullscreen(isFullScreen); for (WindowListener listener: mListeners) { listener.onFullScreen(this, isFullScreen); } } } public boolean isFullScreen() { return mIsFullScreen; } public void addWindowListener(WindowListener aListener) { if (!mListeners.contains(aListener)) { mListeners.add(aListener); } } public void removeWindowListener(WindowListener aListener) { mListeners.remove(aListener); } public void waitForFirstPaint() { setFirstPaintReady(false); setFirstDrawCallback(() -> { if (!isFirstPaintReady()) { setFirstPaintReady(true); mWidgetManager.updateWidget(WindowWidget.this); } }); mWidgetManager.updateWidget(this); } @Override public void handleResizeEvent(float aWorldWidth, float aWorldHeight) { int width = getWindowWidth(aWorldWidth); float aspect = aWorldWidth / aWorldHeight; int height = (int) Math.floor((float)width / aspect); mWidgetPlacement.width = width + mBorderWidth * 2; mWidgetPlacement.height = height + mBorderWidth * 2; mWidgetPlacement.worldWidth = aWorldWidth; mWidgetManager.updateWidget(this); mWidgetManager.updateVisibleWidgets(); } @Override public void releaseWidget() { cleanListeners(mSession); GeckoSession session = mSession.getGeckoSession(); mSetViewQueuedCalls.clear(); if (mSession != null) { mSession.releaseDisplay(); } if (session != null) { session.getTextInput().setView(null); } if (mSurface != null) { mSurface.release(); mSurface = null; } if (mTexture != null && mRenderer == null) { // Custom SurfaceTexture used for GeckoView mTexture.release(); mTexture = null; } mBookmarksView.removeBookmarksListener(mBookmarksViewListener); mHistoryView.removeHistoryListener(mHistoryListener); mWidgetManager.getNavigationBar().removeNavigationBarListener(mNavigationBarListener); SessionStore.get().getBookmarkStore().removeListener(mBookmarksListener); mPromptDelegate.detachFromWindow(); super.releaseWidget(); } @Override public void setFirstPaintReady(final boolean aFirstPaintReady) { mWidgetPlacement.composited = aFirstPaintReady; if (!aFirstPaintReady) { mAfterFirstPaint = false; } } public void setFirstDrawCallback(Runnable aRunnable) { mFirstDrawCallback = aRunnable; } @Override public boolean isFirstPaintReady() { return mWidgetPlacement != null && mWidgetPlacement.composited; } @Override public boolean isVisible() { return mWidgetPlacement.visible; } @Override public boolean isLayer() { return mSurface != null && mTexture == null; } @Override public void setVisible(boolean aVisible) { if (mWidgetPlacement.visible == aVisible) { return; } if (!mIsInVRVideoMode) { mSession.setActive(aVisible); if (aVisible) { callSurfaceChanged(); } } mWidgetPlacement.visible = aVisible; if (!aVisible) { if (mIsBookmarksVisible || mIsHistoryVisible) { mWidgetManager.popWorldBrightness(this); } } else { if (mIsBookmarksVisible || mIsHistoryVisible) { mWidgetManager.pushWorldBrightness(this, WidgetManagerDelegate.DEFAULT_DIM_BRIGHTNESS); } } mIsBookmarksVisible = isBookmarksVisible(); mIsHistoryVisible = isHistoryVisible(); mWidgetManager.updateWidget(this); if (!aVisible) { clearFocus(); } mViewModel.setIsWindowVisible(aVisible); } @Override public void draw(Canvas aCanvas) { if (mView != null) { super.draw(aCanvas); } } public void setSession(@NonNull Session aSession) { setSession(aSession, SESSION_RELEASE_DISPLAY); } public void setSession(@NonNull Session aSession, @OldSessionDisplayAction int aDisplayAction) { if (mSession != aSession) { Session oldSession = mSession; if (oldSession != null) { cleanListeners(oldSession); if (aDisplayAction == SESSION_RELEASE_DISPLAY) { oldSession.releaseDisplay(); } } mSession = aSession; mViewModel.setIsPrivateSession(mSession.isPrivateMode()); if (oldSession != null) { onCurrentSessionChange(oldSession.getGeckoSession(), aSession.getGeckoSession()); } else { onCurrentSessionChange(null, aSession.getGeckoSession()); } setupListeners(mSession); for (WindowListener listener: mListeners) { listener.onSessionChanged(oldSession, aSession); } } mCaptureOnPageStop = false; hideLibraryPanels(); } public void showPopUps() { if (mPromptDelegate != null) { mPromptDelegate.showPopUps(getSession().getGeckoSession()); } } public boolean hasPendingPopUps() { if (mPromptDelegate != null) { return mPromptDelegate.hasPendingPopUps(getSession().getGeckoSession()); } return false; } // Session.GeckoSessionChange @Override public void onCurrentSessionChange(GeckoSession aOldSession, GeckoSession aSession) { Log.d(LOGTAG, "onCurrentSessionChange: " + this.hashCode()); mWidgetManager.setIsServoSession(isInstanceOfServoSession(aSession)); Log.d(LOGTAG, "surfaceChanged: " + aSession.hashCode()); callSurfaceChanged(); aSession.getTextInput().setView(this); mViewModel.setIsPrivateSession(aSession.getSettings().getUsePrivateMode()); waitForFirstPaint(); } @Override public void onStackSession(Session aSession) { // e.g. tab opened via window.open() aSession.updateLastUse(); Session current = mSession; setSession(aSession); SessionStore.get().setActiveSession(aSession); current.captureBackgroundBitmap(getWindowWidth(), getWindowHeight()).thenAccept(aVoid -> current.setActive(false)); mWidgetManager.getWindows().showTabAddedNotification(); GleanMetricsService.Tabs.openedCounter(GleanMetricsService.Tabs.TabSource.BROWSER); } @Override public void onUnstackSession(Session aSession, Session aParent) { if (mSession == aSession) { aParent.setActive(true); setSession(aParent); SessionStore.get().setActiveSession(aParent); SessionStore.get().destroySession(aSession); } } // View @Override public InputConnection onCreateInputConnection(final EditorInfo outAttrs) { Log.d(LOGTAG, "BrowserWidget onCreateInputConnection"); GeckoSession session = mSession.getGeckoSession(); if (session == null || mView != null) { return null; } return session.getTextInput().onCreateInputConnection(outAttrs); } @Override public boolean onCheckIsTextEditor() { return !mIsResizing && mSession.isInputActive(); } @Override public boolean onKeyPreIme(int aKeyCode, KeyEvent aEvent) { if (super.onKeyPreIme(aKeyCode, aEvent)) { return true; } GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getTextInput().onKeyPreIme(aKeyCode, aEvent); } @Override public boolean onKeyUp(int aKeyCode, KeyEvent aEvent) { if (super.onKeyUp(aKeyCode, aEvent)) { return true; } GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getTextInput().onKeyUp(aKeyCode, aEvent); } @Override public boolean onKeyDown(int aKeyCode, KeyEvent aEvent) { if (super.onKeyDown(aKeyCode, aEvent)) { return true; } GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getTextInput().onKeyDown(aKeyCode, aEvent); } @Override public boolean onKeyLongPress(int aKeyCode, KeyEvent aEvent) { if (super.onKeyLongPress(aKeyCode, aEvent)) { return true; } GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getTextInput().onKeyLongPress(aKeyCode, aEvent); } @Override public boolean onKeyMultiple(int aKeyCode, int repeatCount, KeyEvent aEvent) { if (super.onKeyMultiple(aKeyCode, repeatCount, aEvent)) { return true; } GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getTextInput().onKeyMultiple(aKeyCode, repeatCount, aEvent); } @Override protected void onFocusChanged(boolean aGainFocus, int aDirection, Rect aPreviouslyFocusedRect) { super.onFocusChanged(aGainFocus, aDirection, aPreviouslyFocusedRect); Log.d(LOGTAG, "BrowserWidget onFocusChanged: " + (aGainFocus ? "true" : "false")); } @Override public boolean onTouchEvent(MotionEvent aEvent) { GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getPanZoomController().onTouchEvent(aEvent) == PanZoomController.INPUT_RESULT_HANDLED; } @Override public boolean onGenericMotionEvent(MotionEvent aEvent) { if (mView != null) { return super.onGenericMotionEvent(aEvent); } else { GeckoSession session = mSession.getGeckoSession(); return (session != null) && session.getPanZoomController().onMotionEvent(aEvent) == PanZoomController.INPUT_RESULT_HANDLED; } } public void showAlert(String title, @NonNull String msg, @Nullable PromptDialogWidget.Delegate callback) { if (mAlertDialog == null) { mAlertDialog = new PromptDialogWidget(getContext()); mAlertDialog.setButtons(new int[] { R.string.ok_button }); mAlertDialog.setCheckboxVisible(false); mAlertDialog.setDescriptionVisible(false); } mAlertDialog.setTitle(title); mAlertDialog.setBody(msg); mAlertDialog.setButtonsDelegate(index -> { mAlertDialog.hide(REMOVE_WIDGET); if (callback != null) { callback.onButtonClicked(index); } mAlertDialog.releaseWidget(); mAlertDialog = null; }); mAlertDialog.show(REQUEST_FOCUS); } public void showConfirmPrompt(String title, @NonNull String msg, @NonNull String[] btnMsg, @Nullable PromptDialogWidget.Delegate callback) { if (mConfirmDialog == null) { mConfirmDialog = new PromptDialogWidget(getContext()); mConfirmDialog.setButtons(new int[] { R.string.cancel_button, R.string.ok_button }); mConfirmDialog.setCheckboxVisible(false); mConfirmDialog.setDescriptionVisible(false); } mConfirmDialog.setTitle(title); mConfirmDialog.setBody(msg); mConfirmDialog.setButtons(btnMsg); mConfirmDialog.setButtonsDelegate(index -> { mConfirmDialog.hide(REMOVE_WIDGET); if (callback != null) { callback.onButtonClicked(index); } mConfirmDialog.releaseWidget(); mConfirmDialog = null; }); mConfirmDialog.show(REQUEST_FOCUS); } public void showDialog(@NonNull String title, @NonNull @StringRes int description, @NonNull @StringRes int [] btnMsg, @Nullable PromptDialogWidget.Delegate buttonsCallback, @Nullable Runnable linkCallback) { mAppDialog = new PromptDialogWidget(getContext()); mAppDialog.setIconVisible(false); mAppDialog.setCheckboxVisible(false); mAppDialog.setDescriptionVisible(false); mAppDialog.setTitle(title); mAppDialog.setBody(description); mAppDialog.setButtons(btnMsg); mAppDialog.setButtonsDelegate(index -> { mAppDialog.hide(REMOVE_WIDGET); if (buttonsCallback != null) { buttonsCallback.onButtonClicked(index); } mAppDialog.releaseWidget(); }); mAppDialog.setLinkDelegate(() -> { mAppDialog.hide(REMOVE_WIDGET); if (linkCallback != null) { linkCallback.run(); } mAppDialog.releaseWidget(); mAppDialog = null; }); mAppDialog.show(REQUEST_FOCUS); } public void showClearCacheDialog() { if (mClearHistoryDialog == null) { mClearHistoryDialog = new ClearHistoryDialogWidget(getContext()); } mClearHistoryDialog.show(REQUEST_FOCUS); } public void setMaxWindowScale(float aScale) { if (mMaxWindowScale != aScale) { mMaxWindowScale = aScale; Pair<Float, Float> maxSize = getSizeForScale(aScale); if (mWidgetPlacement.worldWidth > maxSize.first) { float currentAspect = (float) mWidgetPlacement.width / (float) mWidgetPlacement.height; mWidgetPlacement.worldWidth = maxSize.first; mWidgetPlacement.width = getWindowWidth(maxSize.first); mWidgetPlacement.height = (int) Math.ceil((float)mWidgetPlacement.width / currentAspect); } } } public float getMaxWindowScale() { return mMaxWindowScale; } public @NonNull Pair<Float, Float> getSizeForScale(float aScale) { return getSizeForScale(aScale, SettingsStore.getInstance(getContext()).getWindowAspect()); } public @NonNull Pair<Float, Float> getSizeForScale(float aScale, float aAspect) { float worldWidth = WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width) * (float)SettingsStore.getInstance(getContext()).getWindowWidth() / (float)SettingsStore.WINDOW_WIDTH_DEFAULT; float worldHeight = worldWidth / aAspect; float area = worldWidth * worldHeight * aScale; float targetWidth = (float) Math.sqrt(area * aAspect); float targetHeight = targetWidth / aAspect; return Pair.create(targetWidth, targetHeight); } private int getWindowWidth(float aWorldWidth) { return (int) Math.floor(SettingsStore.WINDOW_WIDTH_DEFAULT * aWorldWidth / WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width)); } private void showLibraryItemContextMenu(@NonNull View view, LibraryMenuWidget.LibraryContextMenuItem item, boolean isLastVisibleItem) { view.requestFocusFromTouch(); hideContextMenus(); float ratio = WidgetPlacement.viewToWidgetRatio(getContext(), WindowWidget.this); Rect offsetViewBounds = new Rect(); getDrawingRect(offsetViewBounds); offsetDescendantRectToMyCoords(view, offsetViewBounds); SessionStore.get().getBookmarkStore().isBookmarked(item.getUrl()).thenAcceptAsync((isBookmarked -> { mLibraryItemContextMenu = new LibraryMenuWidget(getContext(), item, mWidgetManager.canOpenNewWindow(), isBookmarked); mLibraryItemContextMenu.getPlacement().parentHandle = getHandle(); PointF position; if (isLastVisibleItem) { mLibraryItemContextMenu.mWidgetPlacement.anchorY = 0.0f; position = new PointF( (offsetViewBounds.left + view.getWidth()) * ratio, -(offsetViewBounds.top) * ratio); } else { mLibraryItemContextMenu.mWidgetPlacement.anchorY = 1.0f; position = new PointF( (offsetViewBounds.left + view.getWidth()) * ratio, -(offsetViewBounds.top + view.getHeight()) * ratio); } mLibraryItemContextMenu.mWidgetPlacement.translationX = position.x - (mLibraryItemContextMenu.getWidth()/mLibraryItemContextMenu.mWidgetPlacement.density); mLibraryItemContextMenu.mWidgetPlacement.translationY = position.y + getResources().getDimension(R.dimen.library_menu_top_margin)/mLibraryItemContextMenu.mWidgetPlacement.density; mLibraryItemContextMenu.setItemDelegate((new LibraryItemContextMenuClickCallback() { @Override public void onOpenInNewWindowClick(LibraryMenuWidget.LibraryContextMenuItem item) { mWidgetManager.openNewWindow(item.getUrl()); hideContextMenus(); } @Override public void onOpenInNewTabClick(LibraryMenuWidget.LibraryContextMenuItem item) { mWidgetManager.openNewTabForeground(item.getUrl()); if (item.getType() == LibraryMenuWidget.LibraryItemType.HISTORY) { GleanMetricsService.Tabs.openedCounter(GleanMetricsService.Tabs.TabSource.HISTORY); } else if (item.getType() == LibraryMenuWidget.LibraryItemType.BOOKMARKS) { GleanMetricsService.Tabs.openedCounter(GleanMetricsService.Tabs.TabSource.BOOKMARKS); } hideContextMenus(); } @Override public void onAddToBookmarks(LibraryMenuWidget.LibraryContextMenuItem item) { SessionStore.get().getBookmarkStore().addBookmark(item.getUrl(), item.getTitle()); hideContextMenus(); } @Override public void onRemoveFromBookmarks(LibraryMenuWidget.LibraryContextMenuItem item) { SessionStore.get().getBookmarkStore().deleteBookmarkByURL(item.getUrl()); hideContextMenus(); } })); mLibraryItemContextMenu.show(REQUEST_FOCUS); }), mUIThreadExecutor).exceptionally(throwable -> { Log.d(LOGTAG, "Error getting the bookmarked status: " + throwable.getLocalizedMessage()); throwable.printStackTrace(); return null; }); } private BookmarksCallback mBookmarksViewListener = new BookmarksCallback() { @Override public void onShowContextMenu(@NonNull View view, @NonNull Bookmark item, boolean isLastVisibleItem) { showLibraryItemContextMenu( view, new LibraryMenuWidget.LibraryContextMenuItem( item.getUrl(), item.getTitle(), LibraryMenuWidget.LibraryItemType.BOOKMARKS), isLastVisibleItem); } @Override public void onFxASynSettings(@NonNull View view) { mWidgetManager.getTray().showSettingsDialog(FXA); } @Override public void onHideContextMenu(@NonNull View view) { hideContextMenus(); } @Override public void onFxALogin(@NonNull View view) { hideBookmarks(); } @Override public void onClickItem(@NonNull View view, Bookmark item) { hideBookmarks(); } }; private HistoryCallback mHistoryListener = new HistoryCallback() { @Override public void onClearHistory(@NonNull View view) { view.requestFocusFromTouch(); showClearCacheDialog(); } @Override public void onShowContextMenu(@NonNull View view, @NonNull VisitInfo item, boolean isLastVisibleItem) { showLibraryItemContextMenu( view, new LibraryMenuWidget.LibraryContextMenuItem( item.getUrl(), item.getTitle(), LibraryMenuWidget.LibraryItemType.HISTORY), isLastVisibleItem); } @Override public void onFxASynSettings(@NonNull View view) { mWidgetManager.getTray().showSettingsDialog(FXA); } @Override public void onHideContextMenu(@NonNull View view) { hideContextMenus(); } @Override public void onFxALogin(@NonNull View view) { hideHistory(); } @Override public void onClickItem(@NonNull View view, @NonNull VisitInfo item) { hideHistory(); } }; private NavigationBarWidget.NavigationListener mNavigationBarListener = new NavigationBarWidget.NavigationListener() { @Override public void onBack() { hideLibraryPanels(); } @Override public void onForward() { hideLibraryPanels(); } @Override public void onReload() { hideLibraryPanels(); } @Override public void onStop() { // Nothing to do } @Override public void onHome() { hideLibraryPanels(); } }; private BookmarksStore.BookmarkListener mBookmarksListener = new BookmarksStore.BookmarkListener() { @Override public void onBookmarksUpdated() { updateBookmarked(); } @Override public void onBookmarkAdded() { updateBookmarked(); } }; private void updateBookmarked() { SessionStore.get().getBookmarkStore().isBookmarked(mViewModel.getUrl().getValue().toString()).thenAcceptAsync(bookmarked -> { if (bookmarked) { mViewModel.setIsBookmarked(true); } else { mViewModel.setIsBookmarked(false); } }, mUIThreadExecutor).exceptionally(throwable -> { Log.d(LOGTAG, "Error checking bookmark: " + throwable.getLocalizedMessage()); throwable.printStackTrace(); return null; }); } private void hideContextMenus() { if (mContextMenu != null) { if (!mContextMenu.isReleased()) { if (mContextMenu.isVisible()) { mContextMenu.hide(REMOVE_WIDGET); } mContextMenu.releaseWidget(); } mContextMenu = null; } if (mSelectionMenu != null) { mSelectionMenu.setDelegate((SelectionActionWidget.Delegate)null); if (!mSelectionMenu.isReleased()) { if (mSelectionMenu.isVisible()) { mSelectionMenu.hide(REMOVE_WIDGET); } mSelectionMenu.releaseWidget(); } mSelectionMenu = null; } if (mWidgetPlacement.tintColor != 0xFFFFFFFF) { mWidgetPlacement.tintColor = 0xFFFFFFFF; mWidgetManager.updateWidget(this); } if (mLibraryItemContextMenu != null && !mLibraryItemContextMenu.isReleased() && mLibraryItemContextMenu.isVisible()) { mLibraryItemContextMenu.hide(REMOVE_WIDGET); } } // GeckoSession.ContentDelegate @Override public void onContextMenu(GeckoSession session, int screenX, int screenY, ContextElement element) { if (element.type == ContextElement.TYPE_VIDEO) { return; } hideContextMenus(); mContextMenu = new ContextMenuWidget(getContext()); mContextMenu.mWidgetPlacement.parentHandle = getHandle(); mContextMenu.setDismissCallback(this::hideContextMenus); mContextMenu.setContextElement(element); mContextMenu.show(REQUEST_FOCUS); mWidgetPlacement.tintColor = 0x555555FF; mWidgetManager.updateWidget(this); } @Override public void onFirstComposite(@NonNull GeckoSession session) { if (!mAfterFirstPaint) { return; } if (mFirstDrawCallback != null) { mUIThreadExecutor.execute(mFirstDrawCallback); mFirstDrawCallback = null; } } @Override public void onFirstContentfulPaint(@NonNull GeckoSession session) { if (mAfterFirstPaint) { return; } if (mFirstDrawCallback != null) { mUIThreadExecutor.execute(mFirstDrawCallback); mFirstDrawCallback = null; mAfterFirstPaint = true; // view queue calls need to be delayed to avoid a deadlock // caused by GeckoSession.syncResumeResizeCompositor() // See: https://github.com/MozillaReality/FirefoxReality/issues/2889 mUIThreadExecutor.execute(() -> { mSetViewQueuedCalls.forEach(Runnable::run); mSetViewQueuedCalls.clear(); }); } } // VideoAvailabilityListener private Media mMedia; @Override public void onVideoAvailabilityChanged(boolean aVideosAvailable) { boolean mediaAvailable; if (mSession != null) { if (mMedia != null) { mMedia.removeMediaListener(mMediaDelegate); } mMedia = mSession.getFullScreenVideo(); if (aVideosAvailable && mMedia != null) { mMedia.addMediaListener(mMediaDelegate); mediaAvailable = true; } else { mediaAvailable = false; } } else { mediaAvailable = false; } if (mediaAvailable) { if (mSession.getFullScreenVideo().isPlayed()) { mViewModel.setIsMediaAvailable(true); mViewModel.setIsMediaPlaying(true); } } else { mMedia = null; mViewModel.setIsMediaAvailable(false); mViewModel.setIsMediaPlaying(false); } } MediaElement.Delegate mMediaDelegate = new MediaElement.Delegate() { @Override public void onPlaybackStateChange(@NonNull MediaElement mediaElement, int state) { switch(state) { case MediaElement.MEDIA_STATE_PLAY: case MediaElement.MEDIA_STATE_PLAYING: mViewModel.setIsMediaAvailable(true); mViewModel.setIsMediaPlaying(true); break; case MediaElement.MEDIA_STATE_PAUSE: mViewModel.setIsMediaAvailable(true); mViewModel.setIsMediaPlaying(false); break; case MediaElement.MEDIA_STATE_ABORT: case MediaElement.MEDIA_STATE_EMPTIED: mViewModel.setIsMediaAvailable(false); mViewModel.setIsMediaPlaying(false); } } }; // GeckoSession.NavigationDelegate @Override public void onPageStart(@NonNull GeckoSession geckoSession, @NonNull String aUri) { mCaptureOnPageStop = true; mViewModel.setUrl(aUri); mViewModel.setIsLoading(true); } @Override public void onPageStop(@NonNull GeckoSession aSession, boolean b) { if (mCaptureOnPageStop || !mSession.hasCapturedBitmap()) { mCaptureOnPageStop = false; captureImage(); } mViewModel.setIsLoading(false); } public void captureImage() { mSession.captureBitmap(); } @Override public void onLocationChange(@NonNull GeckoSession session, @Nullable String url) { mViewModel.setUrl(url); if (StringUtils.isEmpty(url)) { mViewModel.setIsBookmarked(false); } else { SessionStore.get().getBookmarkStore().isBookmarked(url).thenAcceptAsync(aBoolean -> mViewModel.setIsBookmarked(aBoolean), mUIThreadExecutor).exceptionally(throwable -> { throwable.printStackTrace(); return null; }); } } @Override public void onCanGoBack(@NonNull GeckoSession geckoSession, boolean canGoBack) { mViewModel.setCanGoBack(canGoBack); } @Override public void onCanGoForward(@NonNull GeckoSession geckoSession, boolean canGoForward) { mViewModel.setCanGoForward(canGoForward); } @Override public @Nullable GeckoResult<AllowOrDeny> onLoadRequest(GeckoSession aSession, @NonNull LoadRequest aRequest) { final GeckoResult<AllowOrDeny> result = new GeckoResult<>(); Uri uri = Uri.parse(aRequest.uri); if (UrlUtils.isAboutPage(uri.toString())) { if(UrlUtils.isBookmarksUrl(uri.toString())) { showBookmarks(); } else if (UrlUtils.isHistoryUrl(uri.toString())) { showHistory(); } else { hideLibraryPanels(); } } else { hideLibraryPanels(); } if ("file".equalsIgnoreCase(uri.getScheme()) && !mWidgetManager.isPermissionGranted(android.Manifest.permission.READ_EXTERNAL_STORAGE)) { mWidgetManager.requestPermission( aRequest.uri, android.Manifest.permission.READ_EXTERNAL_STORAGE, new GeckoSession.PermissionDelegate.Callback() { @Override public void grant() { result.complete(AllowOrDeny.ALLOW); } @Override public void reject() { result.complete(AllowOrDeny.DENY); } }); return result; } result.complete(AllowOrDeny.ALLOW); return result; } // GeckoSession.HistoryDelegate @Override public void onHistoryStateChange(@NonNull GeckoSession geckoSession, @NonNull HistoryList historyList) { if (!mSession.isPrivateMode()) { for (HistoryItem item : historyList) { SessionStore.get().getHistoryStore().recordObservation(item.getUri(), new PageObservation(item.getTitle())); } } } @Nullable @Override public GeckoResult<Boolean> onVisited(@NonNull GeckoSession geckoSession, @NonNull String url, @Nullable String lastVisitedURL, int flags) { if (mSession.isPrivateMode() || (flags & VISIT_TOP_LEVEL) == 0 || (flags & VISIT_UNRECOVERABLE_ERROR) != 0) { return GeckoResult.fromValue(false); } // Check if we want this type of url. if (!shouldStoreUri(url)) { return GeckoResult.fromValue(false); } boolean isReload = lastVisitedURL != null && lastVisitedURL.equals(url); VisitType visitType; if (isReload) { visitType = VisitType.RELOAD; } else { // Note the difference between `VISIT_REDIRECT_PERMANENT`, // `VISIT_REDIRECT_TEMPORARY`, `VISIT_REDIRECT_SOURCE`, and // `VISIT_REDIRECT_SOURCE_PERMANENT`. // // The former two indicate if the visited page is the *target* // of a redirect; that is, another page redirected to it. // // The latter two indicate if the visited page is the *source* // of a redirect: it's redirecting to another page, because the // server returned an HTTP 3xy status code. if ((flags & VISIT_REDIRECT_PERMANENT) != 0) { visitType = VisitType.REDIRECT_PERMANENT; } else if ((flags & VISIT_REDIRECT_TEMPORARY) != 0) { visitType = VisitType.REDIRECT_TEMPORARY; } else { visitType = VisitType.LINK; } } RedirectSource redirectSource; if ((flags & GeckoSession.HistoryDelegate.VISIT_REDIRECT_SOURCE_PERMANENT) != 0) { redirectSource = RedirectSource.PERMANENT; } else if ((flags & GeckoSession.HistoryDelegate.VISIT_REDIRECT_SOURCE) != 0) { redirectSource = RedirectSource.TEMPORARY; } else { redirectSource = RedirectSource.NOT_A_SOURCE; } SessionStore.get().getHistoryStore().recordVisit(url, new PageVisit(visitType, redirectSource)); SessionStore.get().getHistoryStore().recordObservation(url, new PageObservation(url)); return GeckoResult.fromValue(true); } /** * Filter out unwanted URIs, such as "chrome:", "about:", etc. * Ported from nsAndroidHistory::CanAddURI * See https://dxr.mozilla.org/mozilla-central/source/mobile/android/components/build/nsAndroidHistory.cpp#326 */ private boolean shouldStoreUri(@NonNull String uri) { Uri parsedUri = Uri.parse(uri); String scheme = parsedUri.getScheme(); if (scheme == null) { return false; } // Short-circuit most common schemes. if (scheme.equals("http") || scheme.equals("https")) { return true; } // Allow about about:reader uris. They are of the form: // about:reader?url=http://some.interesting.page/to/read.html if (uri.startsWith("about:reader")) { return true; } List<String> schemasToIgnore = Stream.of( "about", "imap", "news", "mailbox", "moz-anno", "moz-extension", "view-source", "chrome", "resource", "data", "javascript", "blob" ).collect(Collectors.toList()); return !schemasToIgnore.contains(scheme); } @UiThread @Nullable public GeckoResult<boolean[]> getVisited(@NonNull GeckoSession geckoSession, @NonNull String[] urls) { if (mSession.isPrivateMode()) { return GeckoResult.fromValue(new boolean[]{}); } GeckoResult<boolean[]> result = new GeckoResult<>(); SessionStore.get().getHistoryStore().getVisited(Arrays.asList(urls)).thenAcceptAsync(list -> { final boolean[] primitives = new boolean[list.size()]; int index = 0; for (Boolean object : list) { primitives[index++] = object; } result.complete(primitives); }, mUIThreadExecutor).exceptionally(throwable -> { Log.d(LOGTAG, "Error getting history: " + throwable.getLocalizedMessage()); throwable.printStackTrace(); return null; }); return result; } // GeckoSession.ProgressDelegate @Override public void onSecurityChange(GeckoSession geckoSession, SecurityInformation securityInformation) { mViewModel.setIsInsecure(!securityInformation.isSecure); } // GeckoSession.SelectionActionDelegate @Override public void onShowActionRequest(@NonNull GeckoSession aSession, @NonNull Selection aSelection) { if (aSelection.availableActions.size() == 1 && (aSelection.availableActions.contains(GeckoSession.SelectionActionDelegate.ACTION_HIDE))) { // See: https://github.com/MozillaReality/FirefoxReality/issues/2214 aSelection.hide(); return; } hideContextMenus(); mSelectionMenu = new SelectionActionWidget(getContext()); mSelectionMenu.mWidgetPlacement.parentHandle = getHandle(); mSelectionMenu.setActions(aSelection.availableActions); Matrix matrix = new Matrix(); aSession.getClientToSurfaceMatrix(matrix); matrix.mapRect(aSelection.clientRect); RectF selectionRect = null; if (aSelection.clientRect != null) { float ratio = WidgetPlacement.worldToWindowRatio(getContext()); selectionRect = new RectF( aSelection.clientRect.left * ratio, aSelection.clientRect.top* ratio, aSelection.clientRect.right * ratio, aSelection.clientRect.bottom * ratio ); } mSelectionMenu.setSelectionRect(selectionRect); mSelectionMenu.setDelegate(new SelectionActionWidget.Delegate() { @Override public void onAction(String action) { hideContextMenus(); if (aSelection.isActionAvailable(action)) { aSelection.execute(action); } if (GeckoSession.SelectionActionDelegate.ACTION_COPY.equals(action) && aSelection.isActionAvailable(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT)) { // Don't keep the text selected after it's copied. aSelection.execute(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT); } } @Override public void onDismiss() { if (aSelection.isActionAvailable(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT)) { aSelection.unselect(); } else if (aSelection.isActionAvailable(GeckoSession.SelectionActionDelegate.ACTION_COLLAPSE_TO_END)) { aSelection.collapseToEnd() ; } aSelection.hide(); } }); mSelectionMenu.show(KEEP_FOCUS); } @Override public void onHideAction(@NonNull GeckoSession aSession, int aHideReason) { hideContextMenus(); } }
1
9,093
I wonder if we can just check if the mSession is active and then only call `setActive(true)` and `callSurfaceChanged()` if it isn't?
MozillaReality-FirefoxReality
java
@@ -1336,6 +1336,8 @@ SDDkwd__(EXE_DIAGNOSTIC_EVENTS, "OFF"), SDDui___(EXE_MEMORY_FOR_PROBE_CACHE_IN_MB, "100"), + SDDui___(EXE_MEMORY_FOR_UNPACK_ROWS_IN_MB, "100"), + // lower-bound memory limit for BMOs/nbmos (in MB) DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN, "10"),
1
/* -*-C++-*- // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ ***************************************************************************** * * File: NADefaults.cpp * Description: Implementation for the defaults table class, NADefaults. * * Created: 7/11/96 * Language: C++ * * * * ***************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Platform.h" #include "NADefaults.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef NA_HAS_SEARCH_H #include <search.h> // use the bsearch binary search routine of the C RTL #else #include <unistd.h> // on OSS, bsearch comes from unistd.h #endif #include "nsk/nskport.h" #if !defined(NDEBUG) #endif #include "CliDefs.h" #include "CmpContext.h" #include "CmpErrors.h" #include "ComObjectName.h" #include "ComRtUtils.h" #include "ComSchemaName.h" #include "ex_error.h" #include "DefaultConstants.h" #include "DefaultValidator.h" #include "NAClusterInfo.h" #include "parser.h" #include "sql_id.h" #include "SQLCLIdev.h" #include "Sqlcomp.h" #include "StmtCompilationMode.h" #include "OptimizerSimulator.h" #include "CmpSeabaseDDL.h" #include "Globals.h" #include "QCache.h" #include "SqlParserGlobals.h" // MUST be last #include! #include "seabed/ms.h" #include "seabed/fs.h" #define NADHEAP CTXTHEAP #define ERRWARN(msg) ToErrorOrWarning(msg, errOrWarn) #define ERRWARNLOOP(msg) ToErrorOrWarning(msg, errOrWarnLOOP) #define ENUM_RANGE_CHECK(e) (e >= 0 && (size_t)e < numDefaultAttributes()) #define ATTR_RANGE_CHECK ENUM_RANGE_CHECK(attrEnum) #ifndef NDEBUG #define ATTR_RANGE_ASSERT CMPASSERT(ATTR_RANGE_CHECK) #else #define ATTR_RANGE_ASSERT #endif // ------------------------------------------------------------------------- // This table contains defaults used in SQLARK. // To add a default, put it in sqlcomp/DefaultConstants.h and in this table. // // The #define declares the domain (allowed range of values) of the attr-value; // typically it is Int1 or UI1 (signed or unsigned integral, >=1) // to prevent division-by-zero errors in the calling code. // // The first column is the internal enum value from sqlcomp/DefaultConstants.h. // The second column is the default value as a string. // // The DDxxxx macro identifies the domain of the attribute // (the range and properties of the possible values). // // XDDxxxx does the same *and* externalizes the attribute // (makes it visible to SHOWCONTROL; *you* need to tell Pubs to document it). // // SDDxxxx does the same and externalizes the attribute to HP support personnel // (makes it visible to HPDM when support is logged on; *you* need to tell Pubs // to document it in the support manual. You can set the // SHOWCONTROL_SUPPORT_ATTRS CQD to ON to see all the externalized and // support-level CQDs). // // For instance, DDflt0 allows any nonnegative floating-point number, while // DDflte allows any positive float (the e stands for epsilon, that tiniest // scintilla >0 in classical calculus, and something like +1E-38 on a Pentium). // DDui allows only nonnegative integral values (ui=unsigned int), // DDui1 allows only ints > 0, DDui2 only nonzero multiples of 2, etc. // // DDkwd validates keywords. Each attribute that is DDkwd has its own subset // of acceptable tokens -- the default behavior is that the attr is bivalent // (ON/OFF or TRUE/FALSE or ENABLE/DISABLE). If you want different keywords, // see enum DefaultToken in DefaultConstants.h, and NADefaults::token() below. // // Other DD's validate percentages, and Ansi names. Certainly more could be // defined, for more restrictive ranges or other criteria. // ************************************************************************* // NOTE: You must keep the entire list in alphabetical order, // or else the lookup will not work!!!!!!! Use only CAPITAL LETTERS!!!!!!!!! // ************************************************************************* // NOTE 2: If you choose to "hide" the default default value by setting it to // "ENABLE" or "SYSTEM" or "", your code must handle this possibility. // // See OptPhysRelExpr.cpp's handling of PARALLEL_NUM_ESPS, // an unsigned positive int which also accepts the keyword setting of "SYSTEM". // See ImplRule.cpp's use of INSERT_VSBB, a keyword attr which allows "SYSTEM". // // A simple way to handle ON/OFF keywords that you want to hide the default for: // Take OPTIMIZER_PRUNING as an example. Right now, it appears below with // default "OFF", and opt.cpp does // DisablePruning = (NADEFAULT(OPTIMIZER_PRUNING) == DF_OFF); // To hide the default default, // you would enter it below as "SYSTEM", and opt.cpp would do // DisablePruning = (NADEFAULT(OPTIMIZER_PRUNING) != DF_ON); // (i.e., DF_OFF and DF_SYSTEM would be treated identically, as desired). // ************************************************************************* // NOTE 3: The user is always allowed to say // CONTROL QUERY DEFAULT attrname 'SYSTEM'; -- or 'ENABLE' or '' // What this means is that the current setting for that attribute // reverts to its default-default value. This default-default value // may or may not be "SYSTEM"; this is completely orthogonal/irrelevant // to the CQD usage. // // One gotcha: 'ENABLE' is a synonym for 'SYSTEM', *EXCEPT* when the // SYSTEM default (the default-default) is "DISABLE". // In this case, 'ENABLE' is a synonym for 'ON' // (the opposite of the synonyms DISABLE/OFF). // ************************************************************************* // NOTE 4: After modifying this static table in any way, INCLUDING A CODE MERGE, // for a quick sanity check, run w:/toolbin/checkNAD. // For a complete consistency check, compile this file, link arkcmp, and // runregr TEST050. // ************************************************************************* struct DefaultDefault { enum DefaultConstants attrEnum; const char *attrName; const char *value; const DefaultValidator *validator; UInt32 flags; }; #define DD(name,value,validator) { name, "" # name "", value, validator } #define FDD(name,value,validator,flags) { name, "" # name "", value, validator, flags } #define XDD(name,value,validator) FDD(name,value,validator,DEFAULT_IS_EXTERNALIZED) #define SDD(name,value,validator) FDD(name,value,validator,DEFAULT_IS_FOR_SUPPORT) #define DDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD) #define XDDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD | DEFAULT_IS_EXTERNALIZED) #define SDDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD | DEFAULT_IS_FOR_SUPPORT) #define DD_____(name,value) DD(name,value,&validateUnknown) #define XDD_____(name,value) XDD(name,value,&validateUnknown) #define SDD_____(name,value) SDD(name,value,&validateUnknown) #define DDS_____(name,value) DDS(name,value,&validateUnknown) #define XDDS_____(name,value) XDDS(name,value,&validateUnknown) #define DDansi_(name,value) DD(name,value,&validateAnsiName) #define XDDansi_(name,value) XDD(name,value,&validateAnsiName) #define DDcoll_(name,value) DD(name,value,&validateCollList) #define DDint__(name,value) DD(name,value,&validateInt) #define SDDint__(name,value) SDD(name,value,&validateInt) #define XDDint__(name,value) XDD(name,value,&validateInt) #define DDSint__(name,value) DDS(name,value,&validateInt) #define XDDSint__(name,value) XDDS(name,value,&validateInt) #define XDDintN2(name,value) XDD(name,value,&validateIntNeg2) #define DDintN1__(name,value) DD(name,value,&validateIntNeg1) #define DDpct__(name,value) DD(name,value,&validatePct) #define XDDpct__(name,value) XDD(name,value,&validatePct) #define SDDpct__(name,value) SDD(name,value,&validatePct) #define DDpct1_50(name,value) DD(name,value,&validatePct1_t50) #define DD0_10485760(name,value) DD(name,value,&validate0_10485760) #define DD0_255(name,value) DD(name,value,&validate0_255) #define DD0_200000(name,value) DD(name,value,&validate0_200000) #define XDD0_200000(name,value) XDD(name,value,&validate0_200000) #define DD1_200000(name,value) DD(name,value,&validate1_200000) #define XDDui30_32000(name,value) XDD(name,value,&validate30_32000) #define DDui30_246(name,value) DD(name,value,&validate30_246) #define DDui50_4194303(name,value) DD(name,value,&validate50_4194303) #define DD1_24(name,value) DD(name,value,&validate1_24) #define XDD1_1024(name,value) XDD(name,value,&validate1_1024) #define DD1_1024(name,value) DD(name,value,&validate1_1024) #define DD18_128(name,value) DD(name,value,&validate18_128) #define DD1_128(name,value) DD(name,value,&validate1_128) #define DDui___(name,value) DD(name,value,&validateUI) #define XDDui___(name,value) XDD(name,value,&validateUI) #define SDDui___(name,value) SDD(name,value,&validateUI) #define DDui1__(name,value) DD(name,value,&validateUI1) #define XDDui1__(name,value) XDD(name,value,&validateUI1) #define SDDui1__(name,value) SDD(name,value,&validateUI1) #define DDui2__(name,value) DD(name,value,&validateUI2) #define XDDui2__(name,value) XDD(name,value,&validateUI2) #define DDui8__(name,value) DD(name,value,&validateUI8) #define DDui512(name,value) DD(name,value,&validateUI512) #define DDui0_5(name,value) DD(name,value,&validateUIntFrom0To5) #define XDDui0_5(name,value) XDD(name,value,&validateUIntFrom0To5) #define DDui1_6(name,value) DD(name,value,&validateUIntFrom1To6) #define DDui1_8(name,value) DD(name,value,&validateUIntFrom1To8) #define DDui1_10(name,value) DD(name,value,&validateUIntFrom1To10) #define DDui2_10(name,value) DD(name,value,&validateUIntFrom2To10) #define DDui1500_4000(name,value) DD(name,value,&validateUIntFrom1500To4000) #define DDipcBu(name,value) DD(name,value,&validateIPCBuf) #define XDDipcBu(name,value) XDD(name,value,&validateIPCBuf) #define DDflt__(name,value) DD(name,value,&validateFlt) #define XDDflt__(name,value) XDD(name,value,&validateFlt) #define SDDflt__(name,value) SDD(name,value,&validateFlt) #define DDflt0_(name,value) DD(name,value,&validateFlt0) #define XDDflt0_(name,value) XDD(name,value,&validateFlt0) #define SDDflt0_(name,value) SDD(name,value,&validateFlt0) #define DDflte_(name,value) DD(name,value,&validateFltE) #define XDDflte_(name,value) XDD(name,value,&validateFltE) #define SDDflte_(name,value) SDD(name,value,&validateFltE) #define DDflt1_(name,value) DD(name,value,&validateFlt1) #define XDDflt1_(name,value) XDD(name,value,&validateFlt1) #define DDflt_0_1(name,value) DD(name,value,&validateFlt_0_1) #define XDDflt_0_1(name,value) XDD(name,value,&validateFlt_0_1) #define DDkwd__(name,value) DD(name,value,&validateKwd) #define XDDkwd__(name,value) XDD(name,value,&validateKwd) #define SDDkwd__(name,value) SDD(name,value,&validateKwd) #define DDSkwd__(name,value) DDS(name,value,&validateKwd) #define SDDSkwd__(name,value) SDDS(name,value,&validateKwd) #define DD1_4096(name,value) DD(name,value,&validate1_4096) #define DD0_18(name,value) DD(name,value,&validate0_18) #define DD0_64(name,value) DD(name,value,&validate0_64) #define DD16_64(name,value) DD(name,value,&validate16_64) #define DDalis_(name,value) DD(name,value,&validateAnsiList) #define XDDalis_(name,value) XDD(name,value,&validateAnsiList) #define XDDpos__(name,value) XDD(name,value,&validatePOSTableSizes) #define SDDpos__(name,value) SDD(name,value,&validatePOSTableSizes) #define DDpos__(name,value) DD(name,value,&validatePOSTableSizes) #define DDtp___(name,value) DD(name,value,&validateTraceStr) #define DDosch_(name,value) DD(name,value,&validateOverrideSchema) #define SDDosch_(name,value) SDD(name,value,&validateOverrideSchema) #define DDpsch_(name,value) DD(name,value,&validatePublicSchema) #define SDDpsch_(name,value) SDD(name,value,&validatePublicSchema) #define DDrlis_(name,value) DD(name,value,&validateRoleNameList) #define XDDrlis_(name,value) XDD(name,value,&validateRoleNameList) #define DDrver_(name,value) DD(name,value,&validateReplIoVersion) #define XDDMVA__(name,value) XDD(name,value,&validateMVAge) #define DDusht_(name,value) DD(name,value,&validate_uint16) const DefaultValidator validateUnknown; const DefaultValidator validateAnsiName(CASE_SENSITIVE_ANSI); // e.g. 'c.s.tbl' ValidateCollationList validateCollList(TRUE/*mp-format*/); // list collations const ValidateInt validateInt; // allows neg, zero, pos ints const ValidateIntNeg1 validateIntNeg1;// allows -1 to +infinity ints const ValidateIntNeg1 validateIntNeg2;// allows -1 to +infinity ints const ValidatePercent validatePct; // allows zero to 100 (integral %age) const ValidateNumericRange validatePct1_t50(VALID_UINT, 1, (float)50);// allows 1 to 50 (integral %age) const Validate_0_10485760 validate0_10485760; // allows zero to 10Meg (integer) const Validate_0_255 validate0_255; // allows zero to 255 (integer) const Validate_0_200000 validate0_200000; // allows zero to 200000 (integer) const Validate_1_200000 validate1_200000; // allows 1 to 200000 (integer) const Validate_30_32000 validate30_32000; // allows 30 to 32000 const Validate_30_246 validate30_246; // allows 30 to 246 const Validate_50_4194303 validate50_4194303; // allows 50 to 4194303 (integer) const Validate_1_24 validate1_24; // allows 1 to 24 (integer) const ValidateUInt validateUI; // allows zero and pos const ValidateUInt1 validateUI1; // allows pos only (>= 1) const ValidateUInt2 validateUI2(2); // allows pos multiples of 2 only const ValidateUInt2 validateUI8(8); // pos multiples of 8 only const ValidateUInt2 validateUI512(512); // pos multiples of 512 only const ValidateUIntFrom0To5 validateUIntFrom0To5; // integer from 0 to 5 const ValidateUIntFrom1500To4000 validateUIntFrom1500To4000; // integer from 1 to 6 const ValidateUIntFrom1To6 validateUIntFrom1To6; // integer from 1 to 6 const ValidateUIntFrom1To8 validateUIntFrom1To8; // integer from 1 to 8 const ValidateUIntFrom1To10 validateUIntFrom1To10; // integer from 1 to 10 const ValidateUIntFrom2To10 validateUIntFrom2To10; // integer from 2 to 10 const ValidateIPCBuf validateIPCBuf; // for IPC message buffers (DP2 msgs) const ValidateFlt validateFlt; // allows neg, zero, pos (all nums) const ValidateFltMin0 validateFlt0; // allows zero and pos const ValidateFltMinEpsilon validateFltE; // allows pos only (>= epsilon > 0) const ValidateFltMin1 validateFlt1; // allows pos only (>= 1) const ValidateSelectivity ValidateSelectivity; // allows 0 to 1 (float) const ValidateFlt_0_1 validateFlt_0_1; // allows 0 to 1 (float) const ValidateKeyword validateKwd; // allows relevant keywords only const Validate_1_4096 validate1_4096; // allows 1 to 4096 (integer) which is max character size supported. const Validate_0_18 validate0_18; // allows 0 to 18 (integer) because 18 is max precision supported. const Validate_1_1024 validate1_1024; // allows 1 to 1024 (integer). const Validate_0_64 validate0_64; // allows 0 to 64 (integer) const Validate_16_64 validate16_64; // allows 16 to 64 (integer) const Validate_18_128 validate18_128; // allows 18 to 128 (integer). const Validate_1_128 validate1_128; // allows 1 to 128 (integer). // allows ':' separated list of three part ANSI names const ValidateAnsiList validateAnsiList; // allows ',' separated list of role names const ValidateRoleNameList validateRoleNameList; const ValidatePOSTableSizes validatePOSTableSizes; const ValidateTraceStr validateTraceStr; const ValidateOverrideSchema validateOverrideSchema; // check OverrideSchema format const ValidatePublicSchema validatePublicSchema; // This high value should be same as default value of REPLICATE_IO_VERSION const ValidateReplIoVersion validateReplIoVersion(11,17); const ValidateMVAge validateMVAge; const Validate_uint16 validate_uint16; // See the NOTEs above for how to maintain this list! THREAD_P DefaultDefault defaultDefaults[] = { DDflt0_(ACCEPTABLE_INPUTESTLOGPROP_ERROR, "0.5"), SDDint__(AFFINITY_VALUE, "-2"), // controls the ESP allocation per core. DDkwd__(AGGRESSIVE_ESP_ALLOCATION_PER_CORE, "OFF"), SDDkwd__(ALLOW_AUDIT_ATTRIBUTE_CHANGE, "FALSE"), // Used to control if row sampling will use the sample operator in SQL/MX or the // this should be used for testing only. DML should not be executed on // non-audited tables DDkwd__(ALLOW_DML_ON_NONAUDITED_TABLE, "OFF"), // DP2_EXECUTOR_POSITION_SAMPLE method in DP2. // Valid values are ON, OFF and SYSTEM // ON => choose DP2_ROW_SAMPLING over row sampling in EID, if sampling % is less than 50. // OFF => choose EID row sampling over DP2 row sampling regardless of sampling % // SYSTEM => update stats will choose DP row sampling if sampling % is less than 5. SDDkwd__(ALLOW_DP2_ROW_SAMPLING, "SYSTEM"), DDkwd__(ALLOW_FIRSTN_IN_SUBQUERIES, "TRUE"), // ON/OFF flag to invoke ghost objects from non-licensed process (non-super.super user) who can not use parserflags DDkwd__(ALLOW_GHOST_OBJECTS, "OFF"), // This default, if set to ON, will allow Translate nodes (to/from UCS2) // to be automatically inserted by the Binder if some children of an // ItemExpr are declared as UCS2 and some are declared as ISO88591. DDkwd__(ALLOW_IMPLICIT_CHAR_CASTING, "ON"), // this default, if set to ON, will allow certain incompatible // assignment, like string to int. The assignment will be done by // implicitely CASTing one operand to another as long as CAST between // the two is supported. See binder for details. DDkwd__(ALLOW_INCOMPATIBLE_ASSIGNMENT, "ON"), // this default, if set to ON, will allow certain incompatible // comparisons, like string to int. The comparison will be done by // implicitely CASTing one operand to another as long as CAST between // the two is supported. See binder for details. DDkwd__(ALLOW_INCOMPATIBLE_COMPARISON, "ON"), // this default, if set to ON, will allow certain incompatible // comparisons. This includes incompatible comparisons, assignments, // conversions, UNION, arith, string and case stmts. // See binder(BindItemExpr.cpp, SynthType.cpp) for details. DDkwd__(ALLOW_INCOMPATIBLE_OPERATIONS, "ON"), // if set to 2, the replicateNonKeyVEGPred() mdamkey method // will try to use inputs to filter out VEG elements that are not // local to the associated table to minimize predicate replication. // It is defaulted to 0 (off), as there is some concern that this algoritm // might produce to few replications, which could lead to incorrect results. // Setting the Value to 1 will try a simpler optimization DDui___(ALLOW_INPUT_PRED_REPLICATION_REDUCTION,"0"), // if set to ON, then isolation level (read committed, etc) could be // specified in a regular CREATE VIEW (not a create MV) statement. DDkwd__(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW, "ON"), // if set to ON, then we allow subqueries of degree > 1 in the // select list. DDkwd__(ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST, "SYSTEM"), // by default, a primary key or unique constraint must be non-nullable. // This default, if set, allows them to be nullable. // The default value is OFF. DDkwd__(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, "OFF"), // if set to ON, then ORDER BY could be // specified in a regular CREATE VIEW (not a create MV) statement. DDkwd__(ALLOW_ORDER_BY_IN_CREATE_VIEW, "ON"), DDkwd__(ALLOW_ORDER_BY_IN_SUBQUERIES, "ON"), // rand() function in sql is disabled unless this CQD is turned on DDkwd__(ALLOW_RAND_FUNCTION, "ON"), DDkwd__(ALLOW_RANGE_PARTITIONING, "TRUE"), DDkwd__(ALLOW_RENAME_OF_MVF_OR_SUBQ, "OFF"), DDkwd__(ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK, "OFF"), DDkwd__(ALLOW_SUBQ_IN_SET, "SYSTEM"), DDkwd__(ALLOW_UNEXTERNALIZED_MAINTAIN_OPTIONS, "OFF"), DDSkwd__(ALTPRI_ESP, ""), DDSkwd__(ALTPRI_MASTER, ""), DDS_____(AQR_ENTRIES, ""), DDkwd__(AQR_WNR, "ON"), DDkwd__(AQR_WNR_DELETE_NO_ROWCOUNT, "OFF"), DDkwd__(AQR_WNR_EXPLAIN_INSERT, "OFF"), DDkwd__(AQR_WNR_INSERT_CLEANUP, "OFF"), DDkwd__(AQR_WNR_LOCK_INSERT_TARGET, "OFF"), DDkwd__(ARKCMP_FAKE_HW, "OFF"), DDkwd__(ASG_FEATURE, "ON"), // Set ASM cache DDkwd__(ASM_ALLOWED, "ON"), // Precompute statistics in ASM DDkwd__(ASM_PRECOMPUTE, "OFF"), DDkwd__(ASYMMETRIC_JOIN_TRANSFORMATION, "MAXIMUM"), DDkwd__(ATTEMPT_ASYNCHRONOUS_ACCESS, "ON"), DDkwd__(ATTEMPT_ESP_PARALLELISM, "ON"), DDkwd__(ATTEMPT_REVERSE_SYNCHRONOUS_ORDER, "ON"), DDkwd__(AUTOMATIC_RECOMPILATION, "OFF"), DDkwd__(AUTO_QUERY_RETRY, "SYSTEM"), XDDkwd__(AUTO_QUERY_RETRY_WARNINGS, "OFF"), DDkwd__(BASE_NUM_PAS_ON_ACTIVE_PARTS, "OFF"), // see comments in DefaultConstants.h DDkwd__(BIGNUM_IO, "SYSTEM"), DDint__(BLOCK_ENCRYPTION_MODE, "0"), XDDkwd__(BLOCK_TO_PREVENT_HALLOWEEN, "ON"), DDflte_(BMO_CITIZENSHIP_FACTOR, "1."), DDflte_(BMO_MEMORY_EQUAL_QUOTA_SHARE_RATIO, "0.5"), DDflte_(BMO_MEMORY_ESTIMATE_RATIO_CAP, "0.7"), DDui___(BMO_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY , "25"), DDui___(BMO_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN, "25"), DDui___(BMO_MEMORY_LIMIT_LOWER_BOUND_SORT , "200"), XDDui___(BMO_MEMORY_LIMIT_PER_NODE_IN_MB, "10240"), DDui___(BMO_MEMORY_LIMIT_UPPER_BOUND, "1200"), DDui1__(BMO_MEMORY_SIZE, "204800"), // percentage of physical main memory availabe for BMO. // This value is only used by HJ and HGB to come up with // an initial estimate for the number of clusters to allocate. // It does NOT by any means determine the amount of memory // used by a BMO. The memory usage depends on the amount of // memory available during execution and the amount of input // data. DDflte_(BMO_MEMORY_USAGE_PERCENT, "5."), // When on, then try to bulk move nullable and variable length column values. DDkwd__(BULK_MOVE_NULL_VARCHAR, "ON"), //Temporary fix to bypass volatile schema name checking for non-table objects - ALM Case#4764 DDkwd__(BYPASS_CHECK_FOR_VOLATILE_SCHEMA_NAME, "OFF"), DDkwd__(CACHE_HISTOGRAMS, "ON"), DDkwd__(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS, "OFF"), DD0_200000(CACHE_HISTOGRAMS_IN_KB, "32768"), DDkwd__(CACHE_HISTOGRAMS_MONITOR_HIST_DETAIL, "OFF"), DDkwd__(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL, "OFF"), DD_____(CACHE_HISTOGRAMS_MONITOR_OUTPUT_FILE, ""), DD_____(CACHE_HISTOGRAMS_TRACE_OUTPUT_FILE, ""), DDkwd__(CALL_EMBEDDED_ARKCMP, "OFF"), DDui___(CANCEL_MINIMUM_BLOCKING_INTERVAL, "60"), DDkwd__(CASCADED_GROUPBY_TRANSFORMATION, "ON"), XDDansi_(CATALOG, TRAFODION_SYSCAT_LIT), DDkwd__(CAT_ALLOW_NEW_FEATUREX, "OFF"), // Control whether authorization caches immutable users DDkwd__(CAT_AUTHORIZATION_CACHE_IMMUTABLE_USERS, "ON"), DDkwd__(CAT_CREATE_SCHEMA_LABELS_ON_ALL_SEGMENTS, "ON"), DDkwd__(CAT_DEFAULT_COMPRESSION, "NONE"), // Metadata table distribution schemes // OFF - Place all metadata tables on one single disk // LOCAL_NODE - Distribute metadata tables across disks on local segment // where first schema in the catalog is created // ON - Distribute metadata tables across disks in local segment // and visible remote segments SDDkwd__(CAT_DISTRIBUTE_METADATA, "ON"), //SDDkwd__(CAT_DISTRIBUTE_METADATA, "ON"), // This disables Query Invalidation processing in catman when set to "OFF" SDDkwd__(CAT_ENABLE_QUERY_INVALIDATION, "ON"), // Throw an error if a column is part of the store by clause and // is not defined as NOT NULL return an error DDkwd__(CAT_ERROR_ON_NOTNULL_STOREBY, "ON"), DDui1__(CAT_FS_TIMEOUT, "9000"), // Used to make ignore "already exists" error in Create and // "does not exist" error in Drop. DDkwd__(CAT_IGNORE_ALREADY_EXISTS_ERROR, "OFF"), DDkwd__(CAT_IGNORE_DOES_NOT_EXIST_ERROR, "OFF"), // Used to make catman test134 predictable DDkwd__(CAT_IGNORE_EMPTY_CATALOGS, "OFF"), // Catalog Manager internal support for REPLICATE AUTHORIZATION DDkwd__(CAT_IGNORE_REPL_AUTHIDS_ERROR, "OFF"), // This enables the DB Limits functionality. If set to OFF, then blocksize // is restricted to 4096 and clustering key size is limited to 255 bytes. // DB Limits checking is turned off on NT since NT's DP2 does not support // large blocks or keys. DDkwd__(CAT_LARGE_BLOCKS_LARGE_KEYS, "ON"), // If DB Limits is enabled, then increase the default blocksize to 32K // on NSK if the object's clustering key length is larger than this value. DDui1__(CAT_LARGE_BLOCKS_MAX_KEYSIZE, "1"), // If DB Limits is enabled, then increase the default blocksize to 32K // on NSK if the object's row size is larger than this value. DDui1__(CAT_LARGE_BLOCKS_MAX_ROWSIZE, "1"), // Controls how pathnames for routines/procedures/SPJs are interpreted DDkwd__(CAT_LIBRARY_PATH_RELATIVE, "OFF"), DDkwd__(CAT_MORE_SCHEMA_PRIVS, "ON"), DDkwd__(CAT_OVERRIDE_CREATE_DISABLE, "OFF"), // This forces an rcb to be created with a different version number // A "0" means to take the current mxv version DDui___(CAT_RCB_VERSION, "0"), // Controls creation of column privileges for object-level privileges DDkwd__(CAT_REDUNDANT_COLUMN_PRIVS, "ON"), // If schema owner is object owner is ON, then the default owner for objects is the // schema owner. DDkwd__(CAT_SCHEMA_OWNER_IS_OBJECT_OWNER, "OFF"), DDkwd__(CAT_TEST_BOOL, "OFF"), DDint__(CAT_TEST_POINT, "0"), DD_____(CAT_TEST_STRING, "NONE"), // CMP_ERR_LOG_FILE indicates where to save a log for certain errors. DD_____(CMP_ERR_LOG_FILE, "tdm_arkcmp_errors.log"), DDkwd__(COLLECT_REORG_STATS, "ON"), DDint__(COMPILER_IDLE_TIMEOUT, "1800"), // To match with set session defaults value // tracking compilers specific defaults DDint__(COMPILER_TRACKING_INTERVAL, "0"), DD_____(COMPILER_TRACKING_LOGFILE, "NONE"), DDkwd__(COMPILER_TRACKING_LOGTABLE, "OFF"), DDkwd__(COMPILE_TIME_MONITOR, "OFF"), DD_____(COMPILE_TIME_MONITOR_LOG_ALLTIME_ONLY, "OFF"), DD_____(COMPILE_TIME_MONITOR_OUTPUT_FILE, "NONE"), // complexity threshold beyond which a // MultiJoin query is considered too complex DDflt0_(COMPLEX_MJ_QUERY_THRESHOLD, "1000000"), // Switch between new aligned internal format and exploded format DDkwd__(COMPRESSED_INTERNAL_FORMAT, "SYSTEM"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BMO, "SYSTEM"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BMO_AFFINITY, "ON"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BULK_MOVE, "ON"), DDflt0_(COMPRESSED_INTERNAL_FORMAT_DEFRAG_RATIO, "0.30"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_EXPLAIN, "OFF"), DDui1__(COMPRESSED_INTERNAL_FORMAT_MIN_ROW_SIZE, "32"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_ROOT_DOES_CONVERSION, "OFF"), DDflt0_(COMPRESSED_INTERNAL_FORMAT_ROW_SIZE_ADJ, "0.90"), XDDkwd__(COMPRESSION_TYPE, "NONE"), // These are switches and variables to use for compiler debugging DDkwd__(COMP_BOOL_1, "OFF"), DDkwd__(COMP_BOOL_10, "OFF"), DDkwd__(COMP_BOOL_100, "OFF"), DDkwd__(COMP_BOOL_101, "OFF"), DDkwd__(COMP_BOOL_102, "OFF"), DDkwd__(COMP_BOOL_103, "OFF"), DDkwd__(COMP_BOOL_104, "OFF"), DDkwd__(COMP_BOOL_105, "OFF"), DDkwd__(COMP_BOOL_106, "OFF"), DDkwd__(COMP_BOOL_107, "ON"), // Being used for testing default predicate synthesis in cardinality estimation DDkwd__(COMP_BOOL_108, "ON"), // Being used for testing default predicate synthesis in cardinality estimation DDkwd__(COMP_BOOL_109, "OFF"), DDkwd__(COMP_BOOL_11, "OFF"), DDkwd__(COMP_BOOL_110, "OFF"), DDkwd__(COMP_BOOL_111, "OFF"), DDkwd__(COMP_BOOL_112, "OFF"), DDkwd__(COMP_BOOL_113, "OFF"), DDkwd__(COMP_BOOL_114, "OFF"), DDkwd__(COMP_BOOL_115, "OFF"), DDkwd__(COMP_BOOL_116, "OFF"), DDkwd__(COMP_BOOL_117, "OFF"), DDkwd__(COMP_BOOL_118, "OFF"), // soln 10-100508-0135 - allow undo of fix. DDkwd__(COMP_BOOL_119, "OFF"), DDkwd__(COMP_BOOL_12, "OFF"), DDkwd__(COMP_BOOL_120, "OFF"), DDkwd__(COMP_BOOL_121, "OFF"), DDkwd__(COMP_BOOL_122, "ON"), // Solution 10-081203-7708 fix DDkwd__(COMP_BOOL_123, "OFF"), DDkwd__(COMP_BOOL_124, "OFF"), DDkwd__(COMP_BOOL_125, "ON"), DDkwd__(COMP_BOOL_126, "OFF"), DDkwd__(COMP_BOOL_127, "ON"), DDkwd__(COMP_BOOL_128, "ON"), DDkwd__(COMP_BOOL_129, "ON"), DDkwd__(COMP_BOOL_13, "OFF"), DDkwd__(COMP_BOOL_130, "ON"), DDkwd__(COMP_BOOL_131, "OFF"), DDkwd__(COMP_BOOL_132, "OFF"), DDkwd__(COMP_BOOL_133, "OFF"), DDkwd__(COMP_BOOL_134, "ON"), DDkwd__(COMP_BOOL_135, "ON"), DDkwd__(COMP_BOOL_136, "OFF"), DDkwd__(COMP_BOOL_137, "OFF"), // ON enables logging of RewriteJoinPred DDkwd__(COMP_BOOL_138, "OFF"), // ON disables tryToRewriteJoinPredicate DDkwd__(COMP_BOOL_139, "OFF"), DDkwd__(COMP_BOOL_14, "ON"), DDkwd__(COMP_BOOL_140, "ON"), DDkwd__(COMP_BOOL_141, "ON"), // Used for testing MC UEC adjustment for uplifting join cardinality DDkwd__(COMP_BOOL_142, "ON"), // Used for turning on Compile Time Statistics caching DDkwd__(COMP_BOOL_143, "OFF"), DDkwd__(COMP_BOOL_144, "OFF"), // only Key columns usage as a part of materialization of disjuncts is controlled by the CQD DDkwd__(COMP_BOOL_145, "ON"), // Used for selectivity adjustment for MC Joins DDkwd__(COMP_BOOL_146, "OFF"), DDkwd__(COMP_BOOL_147, "OFF"), DDkwd__(COMP_BOOL_148, "ON"), // Used for GroupBy Cardinality Enhancement for complex expressions DDkwd__(COMP_BOOL_149, "ON"), // Used for testing multi-col uniqueness cardinality enhancement DDkwd__(COMP_BOOL_15, "OFF"), DDkwd__(COMP_BOOL_150, "OFF"), DDkwd__(COMP_BOOL_151, "OFF"), DDkwd__(COMP_BOOL_152, "OFF"), DDkwd__(COMP_BOOL_153, "ON"), // skew buster: ON == use round robin, else Co-located. DDkwd__(COMP_BOOL_154, "OFF"), DDkwd__(COMP_BOOL_155, "OFF"), DDkwd__(COMP_BOOL_156, "ON"), // Used by RTS to turn on RTS Stats collection for ROOT operators DDkwd__(COMP_BOOL_157, "OFF"), DDkwd__(COMP_BOOL_158, "OFF"), DDkwd__(COMP_BOOL_159, "OFF"), DDkwd__(COMP_BOOL_16, "OFF"), DDkwd__(COMP_BOOL_160, "OFF"), DDkwd__(COMP_BOOL_161, "OFF"), DDkwd__(COMP_BOOL_162, "ON"), // transform NOT EXISTS subquery using anti_semijoin instead of Join-Agg DDkwd__(COMP_BOOL_163, "OFF"), DDkwd__(COMP_BOOL_164, "OFF"), DDkwd__(COMP_BOOL_165, "ON"), // set to 'ON' in M5 for SQ DDkwd__(COMP_BOOL_166, "OFF"), // ON --> turn off fix for 10-100310-8659. DDkwd__(COMP_BOOL_167, "OFF"), DDkwd__(COMP_BOOL_168, "ON"), DDkwd__(COMP_BOOL_169, "OFF"), DDkwd__(COMP_BOOL_17, "ON"), DDkwd__(COMP_BOOL_170, "ON"), DDkwd__(COMP_BOOL_171, "OFF"), DDkwd__(COMP_BOOL_172, "OFF"), DDkwd__(COMP_BOOL_173, "OFF"), // fix: make odbc params nullable DDkwd__(COMP_BOOL_174, "ON"), // internal usage: merge stmt DDkwd__(COMP_BOOL_175, "OFF"), // internal usage: merge stmt DDkwd__(COMP_BOOL_176, "OFF"), DDkwd__(COMP_BOOL_177, "OFF"), DDkwd__(COMP_BOOL_178, "OFF"), DDkwd__(COMP_BOOL_179, "OFF"), DDkwd__(COMP_BOOL_18, "OFF"), DDkwd__(COMP_BOOL_180, "OFF"), DDkwd__(COMP_BOOL_181, "OFF"), DDkwd__(COMP_BOOL_182, "OFF"), // internal usage DDkwd__(COMP_BOOL_183, "OFF"), DDkwd__(COMP_BOOL_184, "ON"), // ON => use min probe size for mdam. Using min probe size of 1 or 2 currently has a bug so this is not the default. OFF => use default probe size of 100 DDkwd__(COMP_BOOL_185, "ON"), //Fix, allows extract(year from current_date) to be treated as a userinput DDkwd__(COMP_BOOL_186, "OFF"), DDkwd__(COMP_BOOL_187, "OFF"), // reserved for internal usage DDkwd__(COMP_BOOL_188, "OFF"), DDkwd__(COMP_BOOL_189, "OFF"), // reserved for internal usage DDkwd__(COMP_BOOL_19, "OFF"), DDkwd__(COMP_BOOL_190, "OFF"), DDkwd__(COMP_BOOL_191, "OFF"), // Temp for UDF metadata switch DDkwd__(COMP_BOOL_192, "OFF"), DDkwd__(COMP_BOOL_193, "OFF"), DDkwd__(COMP_BOOL_194, "OFF"), DDkwd__(COMP_BOOL_195, "OFF"), // used to enable unexternalized get statistics options. DDkwd__(COMP_BOOL_196, "OFF"), DDkwd__(COMP_BOOL_197, "OFF"), DDkwd__(COMP_BOOL_198, "OFF"), DDkwd__(COMP_BOOL_199, "ON"), DDkwd__(COMP_BOOL_2, "OFF"), DDkwd__(COMP_BOOL_20, "OFF"), // ON -> disable ability of stmt to be canceled. DDkwd__(COMP_BOOL_200, "OFF"), DDkwd__(COMP_BOOL_201, "OFF"), DDkwd__(COMP_BOOL_202, "ON"),// For SQ: // ON: excluding fixup cost // for EXCHANGE for // anti-surf logic; // OFF: do include. // Change to ON in M5 DDkwd__(COMP_BOOL_203, "OFF"), DDkwd__(COMP_BOOL_205, "OFF"), // enable reorg on metadata DDkwd__(COMP_BOOL_206, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_207, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_208, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_209, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_21, "OFF"), DDkwd__(COMP_BOOL_210, "ON"), DDkwd__(COMP_BOOL_211, "ON"), // controls removing constants from group expression DDkwd__(COMP_BOOL_215, "OFF"), DDkwd__(COMP_BOOL_217, "OFF"), DDkwd__(COMP_BOOL_219, "OFF"), // for InMem obj defn DDkwd__(COMP_BOOL_22, "ON"), DDkwd__(COMP_BOOL_220, "OFF"), // UserLoad fastpath opt DDkwd__(COMP_BOOL_221, "OFF"), // unnests a subquery even when there is no explicit correlation DDkwd__(COMP_BOOL_222, "ON"), // R2.5 BR features enabled DDkwd__(COMP_BOOL_223, "OFF"), // enable undocumented options // bulk replicate features DDkwd__(COMP_BOOL_224, "OFF"), // enable undocumented // bulk replicate features DDkwd__(COMP_BOOL_225, "ON"), // enable optimized esps allocation DDkwd__(COMP_BOOL_226, "OFF"), // ON enables UNLOAD feature // for disk label stats. DDkwd__(COMP_BOOL_23, "ON"), DDkwd__(COMP_BOOL_24, "OFF"), // AS enhancement to adjust maxDoP DDkwd__(COMP_BOOL_25, "OFF"), // Being used in Cardinality Estimation DDkwd__(COMP_BOOL_26, "OFF"), DDkwd__(COMP_BOOL_27, "OFF"), DDkwd__(COMP_BOOL_28, "OFF"), DDkwd__(COMP_BOOL_29, "OFF"), DDkwd__(COMP_BOOL_3, "OFF"), DDkwd__(COMP_BOOL_30, "ON"), DDkwd__(COMP_BOOL_31, "OFF"), DDkwd__(COMP_BOOL_32, "OFF"), DDkwd__(COMP_BOOL_33, "OFF"), DDkwd__(COMP_BOOL_34, "OFF"), DDkwd__(COMP_BOOL_35, "OFF"), DDkwd__(COMP_BOOL_36, "OFF"), DDkwd__(COMP_BOOL_37, "OFF"), DDkwd__(COMP_BOOL_38, "OFF"), DDkwd__(COMP_BOOL_39, "OFF"), DDkwd__(COMP_BOOL_4, "OFF"), DDkwd__(COMP_BOOL_40, "ON"), DDkwd__(COMP_BOOL_41, "OFF"), DDkwd__(COMP_BOOL_42, "ON"), DDkwd__(COMP_BOOL_43, "OFF"), DDkwd__(COMP_BOOL_44, "OFF"), DDkwd__(COMP_BOOL_45, "ON"), DDkwd__(COMP_BOOL_46, "OFF"), DDkwd__(COMP_BOOL_47, "ON"), DDkwd__(COMP_BOOL_48, "ON"), // Turned "Off" because of Regression failure DDkwd__(COMP_BOOL_49, "OFF"), DDkwd__(COMP_BOOL_5, "ON"), DDkwd__(COMP_BOOL_50, "OFF"), DDkwd__(COMP_BOOL_51, "OFF"), DDkwd__(COMP_BOOL_52, "OFF"), DDkwd__(COMP_BOOL_53, "ON"), //Turned "ON" for OCB Cost DDkwd__(COMP_BOOL_54, "OFF"), DDkwd__(COMP_BOOL_55, "OFF"), DDkwd__(COMP_BOOL_56, "OFF"), DDkwd__(COMP_BOOL_57, "ON"), DDkwd__(COMP_BOOL_58, "OFF"), DDkwd__(COMP_BOOL_59, "OFF"), DDkwd__(COMP_BOOL_6, "OFF"), // comp_bool_60 is used in costing of an exchange operator. This is // used in deciding to use Nodemap decoupling and other exchange // costing logic. DDkwd__(COMP_BOOL_60, "ON"), DDkwd__(COMP_BOOL_61, "OFF"), DDkwd__(COMP_BOOL_62, "OFF"), DDkwd__(COMP_BOOL_63, "OFF"), DDkwd__(COMP_BOOL_64, "OFF"), DDkwd__(COMP_BOOL_65, "OFF"), DDkwd__(COMP_BOOL_66, "OFF"), DDkwd__(COMP_BOOL_67, "ON"), // Being used in Cardinality Estimation DDkwd__(COMP_BOOL_68, "ON"), DDkwd__(COMP_BOOL_69, "OFF"), DDkwd__(COMP_BOOL_7, "OFF"), DDkwd__(COMP_BOOL_70, "ON"), DDkwd__(COMP_BOOL_71, "OFF"), DDkwd__(COMP_BOOL_72, "OFF"), DDkwd__(COMP_BOOL_73, "OFF"), DDkwd__(COMP_BOOL_74, "ON"), DDkwd__(COMP_BOOL_75, "ON"), DDkwd__(COMP_BOOL_76, "ON"), DDkwd__(COMP_BOOL_77, "OFF"), DDkwd__(COMP_BOOL_78, "OFF"), DDkwd__(COMP_BOOL_79, "ON"), DDkwd__(COMP_BOOL_8, "OFF"), DDkwd__(COMP_BOOL_80, "OFF"), DDkwd__(COMP_BOOL_81, "OFF"), DDkwd__(COMP_BOOL_82, "OFF"), DDkwd__(COMP_BOOL_83, "ON"), DDkwd__(COMP_BOOL_84, "OFF"), DDkwd__(COMP_BOOL_85, "OFF"), DDkwd__(COMP_BOOL_86, "OFF"), DDkwd__(COMP_BOOL_87, "OFF"), DDkwd__(COMP_BOOL_88, "OFF"), DDkwd__(COMP_BOOL_89, "OFF"), DDkwd__(COMP_BOOL_9, "OFF"), DDkwd__(COMP_BOOL_90, "ON"), DDkwd__(COMP_BOOL_91, "OFF"), DDkwd__(COMP_BOOL_92, "OFF"), // used by generator. DDkwd__(COMP_BOOL_93, "ON"), // turn on pushdown for IUDs involving MVs. Default is off DDkwd__(COMP_BOOL_94, "OFF"), DDkwd__(COMP_BOOL_95, "OFF"), DDkwd__(COMP_BOOL_96, "OFF"), DDkwd__(COMP_BOOL_97, "OFF"), DDkwd__(COMP_BOOL_98, "ON"), DDkwd__(COMP_BOOL_99, "OFF"), DDflt0_(COMP_FLOAT_0, "0.002"), DDflt0_(COMP_FLOAT_1, "0.00002"), DDflt0_(COMP_FLOAT_2, "0"), DDflt0_(COMP_FLOAT_3, "0.01"), DDflt0_(COMP_FLOAT_4, "1.1"), DDflt__(COMP_FLOAT_5, "0.01"), // For Split Top cost adjustments : 0.25 DDflt__(COMP_FLOAT_6, "0.67"), // used to set the fudge factor which // is used to estimate cardinality of an // aggregate function in an equi-join expression DDflt__(COMP_FLOAT_7, "1.5"), DDflt__(COMP_FLOAT_8, "0.8"), // min expected #groups when HGB under right side of NLJ DDflt__(COMP_FLOAT_9, "1002.0"), DDint__(COMP_INT_0, "5000"), DDint__(COMP_INT_1, "0"), DDint__(COMP_INT_10, "3"), DDint__(COMP_INT_11, "-1"), DDint__(COMP_INT_12, "0"), DDint__(COMP_INT_13, "0"), DDint__(COMP_INT_14, "0"), DDint__(COMP_INT_15, "7"), DDint__(COMP_INT_16, "1000000"), DDint__(COMP_INT_17, "1000000"), DDint__(COMP_INT_18, "1"), DDint__(COMP_INT_19, "2"), DDint__(COMP_INT_2, "1"), DDint__(COMP_INT_20, "4"), DDint__(COMP_INT_21, "0"), DDint__(COMP_INT_22, "0"), // used to control old parser based INLIST transformation // 0 ==> OFF, positive value implies ON and has the effect of implicitly shutting down much of OR_PRED transformations // this cqd has been retained as a fallback in case OR_PRED has bugs. DDint__(COMP_INT_23, "22"), DDint__(COMP_INT_24, "1000000000"), DDint__(COMP_INT_25, "0"), DDint__(COMP_INT_26, "1"), DDint__(COMP_INT_27, "0"), DDint__(COMP_INT_28, "0"), DDint__(COMP_INT_29, "0"), DDint__(COMP_INT_3, "5"), DDint__(COMP_INT_30, "5"), DDint__(COMP_INT_31, "5"), DDint__(COMP_INT_32, "100"), DDint__(COMP_INT_33, "0"), DDint__(COMP_INT_34, "10000"), // lower bound: 10000 DDint__(COMP_INT_35, "500000"), // upper bound: 200000 DDint__(COMP_INT_36, "128"), // Bounds for producer for OCB DDint__(COMP_INT_37, "0"), DDint__(COMP_INT_38, "0"), // test master's abend DDint__(COMP_INT_39, "0"), // test esp's abend DDint__(COMP_INT_4, "400"), DDint__(COMP_INT_40, "10"), // this defines the percentage of selectivity after applying equality predicates on single column histograms // beyond which the optimizer should use MC stats DDint__(COMP_INT_41, "0"), DDint__(COMP_INT_42, "0"), DDint__(COMP_INT_43, "3"), // this is only for testing purposes. Once HIST_USE_SAMPLE_FOR_CARDINALITY_ESTIMATION is set to ON by default, the value of this CQD should be adjusted DDint__(COMP_INT_44, "1000000"), // frequency threshold above which // a boundary value will be inclded // in the frequentValueList (stats) DDint__(COMP_INT_45, "300"), DDint__(COMP_INT_46, "10"), DDint__(COMP_INT_47, "0"), DDint__(COMP_INT_48, "32"), // # trips thru scheduler task list before eval of CPU time limit. DDint__(COMP_INT_49, "0"), DDint__(COMP_INT_5, "0"), DDint__(COMP_INT_50, "0"), DDint__(COMP_INT_51, "0"), DDint__(COMP_INT_52, "0"), DDint__(COMP_INT_53, "0"), DDint__(COMP_INT_54, "0"), DDint__(COMP_INT_55, "0"), DDint__(COMP_INT_56, "0"), DDint__(COMP_INT_57, "0"), DDint__(COMP_INT_58, "0"), DDint__(COMP_INT_59, "0"), DDint__(COMP_INT_6, "400"), // comp_int_60 is used in costing of an exchnage operator. It is // used to indicate buffer size of a DP2 exchange when sending // messages down. DDint__(COMP_INT_60, "4"), DDint__(COMP_INT_61, "0"), // Exchange operator default value DDint__(COMP_INT_62, "10000"), DDint__(COMP_INT_63, "10000"), // SG Insert issue DDint__(COMP_INT_64, "0"), DDint__(COMP_INT_65, "0"), DDint__(COMP_INT_66, "0"), // to change #buffers per flushed cluster DDint__(COMP_INT_67, "8"), // to test #outer-buffers per a batch DDint__(COMP_INT_68, "0"), DDint__(COMP_INT_69, "0"), DDint__(COMP_INT_7, "10000000"), DDint__(COMP_INT_70, "1000000"), DDint__(COMP_INT_71, "0"), DDint__(COMP_INT_72, "0"), // if set to 1, allows keyPredicate to be inserted without passing key col. DDint__(COMP_INT_73, "1"), // if set to 1, disables cursor_delete plan if there are no alternate indexes. DDint__(COMP_INT_74, "0"), DDint__(COMP_INT_75, "0"), DDint__(COMP_INT_76, "0"), DDint__(COMP_INT_77, "0"), DDint__(COMP_INT_78, "0"), DDint__(COMP_INT_79, "0"), // this is used temporaraly as value for parallel threshold // in case ATTEMPT_ESP_PARALLELISM is set to MAXIMUM DDint__(COMP_INT_8, "20"), DDint__(COMP_INT_80, "3"), DDint__(COMP_INT_81, "0"), DDint__(COMP_INT_82, "0"), DDint__(COMP_INT_83, "0"), // max num of retries after parl purgedata open/control call errs.Default 25. DDint__(COMP_INT_84, "25"), // delay between each paral pd error retry. Default is 2 seconds. DDint__(COMP_INT_85, "2"), DDint__(COMP_INT_86, "0"), DDint__(COMP_INT_87, "0"), DDint__(COMP_INT_88, "0"), DDint__(COMP_INT_89, "2"), DDint__(COMP_INT_9, "0"), DDint__(COMP_INT_90, "0"), DDint__(COMP_INT_91, "0"), DDint__(COMP_INT_92, "0"), DDint__(COMP_INT_93, "0"), DDint__(COMP_INT_94, "0"), DDint__(COMP_INT_95, "0"), DDint__(COMP_INT_96, "0"), DDint__(COMP_INT_97, "0"), DDint__(COMP_INT_98, "512"), DDint__(COMP_INT_99, "10"), DD_____(COMP_STRING_1, "NONE"), DD_____(COMP_STRING_2, ""), DD_____(COMP_STRING_3, ""), DD_____(COMP_STRING_4, ""), DD_____(COMP_STRING_5, ""), DD_____(COMP_STRING_6, ""), // Configured_memory_for defaults are all measured in KB DDui___(CONFIGURED_MEMORY_FOR_BASE, "16384"), DDui___(CONFIGURED_MEMORY_FOR_DAM, "20480"), DDui___(CONFIGURED_MEMORY_FOR_MINIMUM_HASH, "20480"), DDui___(CONFIGURED_MEMORY_FOR_MXESP, "8192"), DDkwd__(CONSTANT_FOLDING, "OFF"), DDkwd__(COSTING_SHORTCUT_GROUPBY_FIX, "ON"), DDflt0_(COST_PROBE_DENSITY_THRESHOLD, ".25"), // As of 3/23/98 the tupp desc. length is 12 bytes. Change when executor // changes. DDflt0_(COST_TUPP_DESC_LENGTH_IN_KB, "0.01171875"), DDflt0_(CPUCOST_COMPARE_COMPLEX_DATA_TYPE_OVERHEAD, "10."), DDflt0_(CPUCOST_COMPARE_COMPLEX_DATA_TYPE_PER_BYTE, ".1"), // Same as CPUCOST_PREDICATE_COMPARISON // Change HH_OP_PROBE_HASH_TABLE when you change this value: DDflt0_(CPUCOST_COMPARE_SIMPLE_DATA_TYPE, ".200"), // no cost overhead assumed: DDflt0_(CPUCOST_COPY_ROW_OVERHEAD, "0."), // change CPUCOST_HASH_PER_KEY when changing this value DDflt0_(CPUCOST_COPY_ROW_PER_BYTE, ".0007"), DDflt0_(CPUCOST_COPY_SIMPLE_DATA_TYPE, ".005"), // This is a per data request overhead cost paid by the cpu DDflt0_(CPUCOST_DATARQST_OVHD, ".01"), DDflt0_(CPUCOST_DM_GET, ".001"), DDflt0_(CPUCOST_DM_UPDATE, ".001"), DDflt0_(CPUCOST_ENCODE_PER_BYTE, ".002"), DDflt0_(CPUCOST_ESP_INITIALIZATION, "10"), // The previous observation had calculated the number of seconds to // aggregate incorrectly. Now: // Number of seconds to scan 100,000 rows @ 208 bytes: 4 // Number of seconds to scan 100,000 rows @ 208 bytes and aggregate // 15 aggregates: 17 // Thus, number of seconds per aggregate = (17-4)/15 = 0.866667 // CPUCOST_PER_ROW = 1.13333/(0.00005*100,000) = 0.1733 // previous observation // It takes 13.96 seconds to aggregate 99,999 rows using // 15 expressions, thus at 0.00005 et_cpu, we have that // the cost to eval an arith op is: // 6.14 / (0.00005 * 99,9999 * 15) = 0.0819 DDflt0_(CPUCOST_EVAL_ARITH_OP, ".0305"), DDflt0_(CPUCOST_EVAL_FUNC_DEFAULT, "10."), DDflt0_(CPUCOST_EVAL_LOGICAL_OP, "1."), DDflt0_(CPUCOST_EVAL_SIMPLE_PREDICATE, "1."), DDflt0_(CPUCOST_EXCHANGE_COST_PER_BYTE, ".002"), DDflt0_(CPUCOST_EXCHANGE_COST_PER_ROW, ".002"), DDflt0_(CPUCOST_EXCHANGE_INTERNODE_COST_PER_BYTE, ".008"), DDflt0_(CPUCOST_EXCHANGE_MAPPING_FUNCTION, ".01"), // was 0.1, but now 0.011 // XDDflt0_(CPUCOST_EXCHANGE_REMOTENODE_COST_PER_BYTE, ".011"), // Set the additional cost of copying a byte to message buffer for // remote node to be the same as for inter node, 0.01 // Also change it to be internalized DDflt0_(CPUCOST_EXCHANGE_REMOTENODE_COST_PER_BYTE, ".01"), DDflt0_(CPUCOST_EXCHANGE_SPLIT_FUNCTION, ".01"), // Assume // CPUCOST_HASH_PER_KEY = 4 * CPUCOST_HASH_PER_BYTE // History: // Before 01/06/98: 0.005 DDflt0_(CPUCOST_HASH_PER_BYTE, ".057325"), // Assume // CPUCOST_HASH_PER_KEY = 4 * CPUCOST_HASH_PER_BYTE // From observation: // For a case when all the hash table fits into memory: // 01/05/98: 42,105 rows inserted per second @ 0.00005 seconds // per thousand of instructions, give: // seconds to insert one row = 1/42105 = 0.00002375 // thd. of instructions per row inserted = 1/42105/0.00005 = 0.4750 // The cost is distributed as follows: // CPUCOST_HASH_PER_KEY + CPUCOST_HASH_PER_BYTE*4 + // HH_OP_INSERT_ROW_TO_CHAIN + CPUCOST_COPY_ROW_PER_BYTE * 4 // = 0.4750 // Thus we have: // 2* CPUCOST_HASH_PER_KEY + 0.01 + 0.0016*4 = 0.4750 // -> CPUCOST_HASH_PER_KEY = 0.4586/2 = 0.2293 // History: // Before 01/06/98: 0.02 // Change // CPUCOST_HASH_PER_BYTE // when changing this value DDflt0_(CPUCOST_HASH_PER_KEY, "1.29"), DDflt0_(CPUCOST_LIKE_COMPARE_OVERHEAD, "10."), DDflt0_(CPUCOST_LIKE_COMPARE_PER_BYTE, ".1"), DDflt0_(CPUCOST_LOCK_ROW, ".01"), DDflt0_(CPUCOST_NJ_TUPLST_FF, "10."), // Observation (A971125_1): // CPU time to scan 100,000 rows with no exe pred: 10 // CPU time to scan 100,000 rows with an exe pred like // nonkeycol < K: 11 // CPU time spend in every row: 1/100,000 = .00001 // Thus, at 0.00005 th. inst. per sec we have: 0.00001/0.00005 = // 0.2 thousand inst. to evaluate every row: // // Predicate comparison is very expensive right now (10/08/97) // (cost it that it takes like 1000 instruction for one comparison) // 10/08/97: 1. // Change // CPUCOST_COMPARE_SIMPLE_DATA_TYPE // when you change this value: // History // Before 04/30/98: .2 DDflt0_(CPUCOST_PREDICATE_COMPARISON, ".08"), // Cost of copying the data from disk to the DP2 Cache: DDflt0_(CPUCOST_SCAN_DSK_TO_DP2_PER_KB, "2.5"), DDflt0_(CPUCOST_SCAN_DSK_TO_DP2_PER_SEEK, "0.0"), // The communication between DP2 and ExeInDp2 requires to encode // and decode the key. DDflt0_(CPUCOST_SCAN_KEY_LENGTH, "0."), // The communication between DP2 and ExeInDp2 is complex and // ever changing. The following factor is introduced to // make the costing of scan fit observed CPU time for the scan: DDflt0_(CPUCOST_SCAN_OVH_PER_KB, "0.984215"), DDflt0_(CPUCOST_SCAN_OVH_PER_ROW, "0.0"), // It takes about 1/3 of a second to open a table, thus with a // 0.00005 ff for cpu elapsed time we get: // 1/3/0.00005 = 7000 thousands instructions // CPUCOST_SUBSET_OPEN lumps together all the overhead needed // to set-up the access to each partition. Thus it is a blocking // cost, nothing can overlap with it. DDflt0_(CPUCOST_SUBSET_OPEN, "7000"), DDflt0_(CPUCOST_SUBSET_OPEN_AFTER_FIRST, "1250"), DDflt0_(CPUCOST_TUPLE_REFERENCE, ".001"), DDui___(CREATE_DEFINITION_SCHEMA_VERSION, "0"), DDkwd__(CREATE_EXTERNAL_USER_NAME_INDEX, "OFF"), DDkwd__(CREATE_FOR_NO_RDF_REPLICATE, "OFF"), DDkwd__(CREATE_METADATA_TABLE, "OFF"), DDkwd__(CREATE_OBJECTS_IN_METADATA_ONLY, "OFF"), DDkwd__(CROSS_PRODUCT_CONTROL, "ON"), // CQDs for Common Subexpressions (CSEs) // cache queries containing temp tables for common subexpressions DDkwd__(CSE_CACHE_TEMP_QUERIES, "OFF"), // "cleanup obsolete volatile tables" command cleans up Hive temp tables DDkwd__(CSE_CLEANUP_HIVE_TABLES, "OFF"), // don't temp if all consumers have preds on n key columns DDui___(CSE_COMMON_KEY_PRED_CONTROL, "1"), // emit warnings that help diagnose why CSEs are not shared DDkwd__(CSE_DEBUG_WARNINGS, "OFF"), // create a CommonSubExpr node for CTEs defined in WITH clauses (OFF/ON) DDkwd__(CSE_FOR_WITH, "OFF"), // use Hive tables as temp tables DDkwd__(CSE_HIVE_TEMP_TABLE, "ON"), // don't temp if avg consumer has preds on more than n percent of key cols DDflt0_(CSE_PCT_KEY_COL_PRED_CONTROL, "49.9"), // print debugging info on stdout DDkwd__(CSE_PRINT_DEBUG_INFO, "OFF"), // limit temp table size (based on max. card and regular card) DDflt0_(CSE_TEMP_TABLE_MAX_MAX_SIZE, "1E12"), DDflt0_(CSE_TEMP_TABLE_MAX_SIZE, "1E9"), // implement CommonSubExpr as a temp table (OFF/SYSTEM/ON) DDkwd__(CSE_USE_TEMP, "SYSTEM"), SDDui___(CYCLIC_ESP_PLACEMENT, "1"), // if this one is "ON" it overwrites optimizer heuristics 4 & 5 as "ON" // if it's "OFF" then the defaults of the two heuristics will be used DDkwd__(DATA_FLOW_OPTIMIZATION, "ON"), // DDL Default location support DD_____(DDL_DEFAULT_LOCATIONS, ""), DDkwd__(DDL_EXPLAIN, "OFF"), DDkwd__(DDL_TRANSACTIONS, "ON"), // We ignore this setting for the first (SYSTEM_DEFAULTS) table open+read. DDkwd__(DEFAULTS_TABLE_ACCESS_WARNINGS, "OFF"), SDDkwd__(DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), XDDui1__(DEFAULT_DEGREE_OF_PARALLELISM, "2"), SDDkwd__(DEFAULT_SCHEMA_ACCESS_ONLY, "OFF"), SDDkwd__(DEFAULT_SCHEMA_NAMETYPE, "SYSTEM"), // These DEF_xxx values of "" get filled in by updateSystemParameters(). #define def_DEF_CHUNK_SIZE 5000000.0 #define str_DEF_CHUNK_SIZE "5000000.0" // DDui2__(DEF_CHUNK_SIZE, str_DEF_CHUNK_SIZE), DD_____(DEF_CPU_ARCHITECTURE, ""), DDui1__(DEF_DISCS_ON_CLUSTER, ""), DDui1__(DEF_INSTRUCTIONS_SECOND, ""), DDui___(DEF_LOCAL_CLUSTER_NUMBER, ""), DDui___(DEF_LOCAL_SMP_NODE_NUMBER, ""), //DEF_MAX_HISTORY_ROWS made external RV 06/21/01 CR 10-010425-2440 XDDui1__(DEF_MAX_HISTORY_ROWS, "1024"), DDui___(DEF_NUM_BM_CHUNKS, ""), DDui1__(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS, ""), DDui1__(DEF_NUM_SMP_CPUS, ""), DDui2__(DEF_PAGE_SIZE, ""), DDui1__(DEF_PHYSICAL_MEMORY_AVAILABLE, ""), DDui1__(DEF_TOTAL_MEMORY_AVAILABLE, ""), DDui1__(DEF_VIRTUAL_MEMORY_AVAILABLE, ""), DDkwd__(DESTROY_ORDER_AFTER_REPARTITIONING, "OFF"), // detailed executor statistics DDkwd__(DETAILED_STATISTICS, "OPERATOR"), DDkwd__(DIMENSIONAL_QUERY_OPTIMIZATION, "OFF"), DDkwd__(DISABLE_BUFFERED_INSERTS, "OFF"), DDkwd__(DISABLE_READ_ONLY, "OFF"), DD_____(DISPLAY_DATA_FLOW_GRAPH, "OFF"), XDDkwd__(DISPLAY_DIVISION_BY_COLUMNS, "OFF"), // opens are distributed among all partitions instead of just root. // 0: no distribution, only use root. // -1: max distribution, all partitions // <number>: num of partitions per segment DDint__(DISTRIBUTE_OPENS, "-1"), // temp. disable dop reduction logic DDflt0_(DOP_REDUCTION_ROWCOUNT_THRESHOLD, "0.0"), DDkwd__(DO_MINIMAL_RENAME, "OFF"), // if set, then space needed for executor structures at runtime is // optimized such that the allocation starts with a low number and then // is allocated on a need basis. This means that we may have to allocate // more smaller chunks if much space is needed. But it helps in the case // where many plans are being used and each one only takes a small amount // of space. This optimization especially helps in case of Dp2 fragments // as there is only a finite amount of space available there. Once that // limit is reached, and a new plan is shipped, it means that an existing // eid plan from dp2 memory need to be swapped out and then refixed up. // By reducing space utilization, we end up with more eid sessions in // use inside of dp2. DDkwd__(DO_RUNTIME_EID_SPACE_COMPUTATION, "OFF"), DDkwd__(DO_RUNTIME_SPACE_OPTIMIZATION, "OFF"), DDui2__(DP2_BLOCK_HEADER_SIZE, "96"), // DP2 Cache defaults as of 06/08/98. DDui1__(DP2_CACHE_1024_BLOCKS, "152"), DDui1__(DP2_CACHE_16K_BLOCKS, "1024"), DDui1__(DP2_CACHE_2048_BLOCKS, "150"), DDui1__(DP2_CACHE_32K_BLOCKS, "512"), DDui1__(DP2_CACHE_4096_BLOCKS, "4096"), DDui1__(DP2_CACHE_512_BLOCKS, "152"), DDui1__(DP2_CACHE_8K_BLOCKS, "2048"), // The cache size is about 2000 pages @ 4k each page DDui1__(DP2_CACHE_SIZE_IN_KB, "8000"), // Exchange Costing // 6/12/98. // End of buffer header is 32 bytes or .0313 KB. // Each Exchange->DP2 request is 48 bytes or .0469 KB. DDflte_(DP2_END_OF_BUFFER_HEADER_SIZE, ".0313"), DDflte_(DP2_EXCHANGE_REQUEST_SIZE, ".0469"), DDpct__(DP2_FRACTION_SEEK_FROM_RANDOM_TO_INORDER, "25"), DDui2__(DP2_MAX_READ_PER_ACCESS_IN_KB, "256"), // The buffer size, as of 10/07/97 is 32K DDui2__(DP2_MESSAGE_BUFFER_SIZE, "56"), // Exchange Costing // 6/12/98. // Message header for Exchange->DP2 is 18 bytes or .0176 KB DDflte_(DP2_MESSAGE_HEADER_SIZE, ".0176"), DDui2__(DP2_MESSAGE_HEADER_SIZE_BYTES, "18"), DDui1__(DP2_MINIMUM_FILE_SIZE_FOR_SEEK_IN_BLOCKS, "256"), DDint__(DP2_PRIORITY, "-1001"), DDint__(DP2_PRIORITY_DELTA, "-1001"), DDui1__(DP2_SEQ_READS_WITHOUT_SEEKS, "100"), DDkwd__(DYNAMIC_HISTOGRAM_COMPRESSION, "ON"), DDui2__(DYN_PA_QUEUE_RESIZE_INIT_DOWN, "1024"), DDui2__(DYN_PA_QUEUE_RESIZE_INIT_UP, "1024"), DDui2__(DYN_QUEUE_RESIZE_FACTOR, "4"), DDui2__(DYN_QUEUE_RESIZE_INIT_DOWN, "4"), DDui2__(DYN_QUEUE_RESIZE_INIT_UP, "4"), DDui1__(DYN_QUEUE_RESIZE_LIMIT, "9"), DDkwd__(EID_SPACE_USAGE_OPT, "OFF"), // For both of these CQDs see executor/ExDp2Trace.h for values. DDint__(EID_TRACE_STATES, "0"), DDtp___(EID_TRACE_STR, ""), DDkwd__(ELIMINATE_REDUNDANT_JOINS, "ON"), DDkwd__(ENABLE_DP2_XNS, "OFF"), DDSint__(ESP_ASSIGN_DEPTH, "0"), DDSint__(ESP_FIXUP_PRIORITY_DELTA, "0"), DDint__(ESP_IDLE_TIMEOUT, "1800"), // To match with set session defaults value DDkwd__(ESP_MULTI_FRAGMENTS, "ON"), DDui1500_4000(ESP_MULTI_FRAGMENT_QUOTA_VM, "4000"), DDui1_8(ESP_NUM_FRAGMENTS, "3"), DDSint__(ESP_PRIORITY, "0"), DDSint__(ESP_PRIORITY_DELTA, "0"), // Disable hints - if SYSTEM, enable on SSD, and disable only on HDD DDkwd__(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_HASH, "SYSTEM"), DDkwd__(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_SORT, "SYSTEM"), DDkwd__(EXE_BMO_DISABLE_OVERFLOW, "OFF"), DDui___(EXE_BMO_MIN_SIZE_BEFORE_PRESSURE_CHECK_IN_MB, "50"), DDkwd__(EXE_BMO_SET_BUFFERED_WRITES, "OFF"), SDDkwd__(EXE_DIAGNOSTIC_EVENTS, "OFF"), DDui1__(EXE_HGB_INITIAL_HT_SIZE, "262144"), // == hash buffer DDflt__(EXE_HJ_MIN_NUM_CLUSTERS, "4"), DDkwd__(EXE_LOG_RETRY_IPC, "OFF"), // Total size of memory (in MB) available to BMOs (e.g., 1200 MB) SDDui___(EXE_MEMORY_AVAILABLE_IN_MB, "1200"), SDDui___(EXE_MEMORY_FOR_PARTIALHGB_IN_MB, "100"), SDDui___(EXE_MEMORY_FOR_PROBE_CACHE_IN_MB, "100"), // lower-bound memory limit for BMOs/nbmos (in MB) DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_SEQUENCE , "10"), // Override the memory quota system; set limit per each and every BMO SDDflt__(EXE_MEM_LIMIT_PER_BMO_IN_MB, "0"), DDui1__(EXE_NUM_CONCURRENT_SCRATCH_IOS, "4"), // DDkwd__(EXE_PARALLEL_DDL, "ON"), DDui___(EXE_PA_DP2_STATIC_AFFINITY, "1"), DDkwd__(EXE_SINGLE_BMO_QUOTA, "ON"), // The following 3 are only for testing overflow; zero value means: ignore DDui___(EXE_TEST_FORCE_CLUSTER_SPLIT_AFTER_MB, "0"), DDui___(EXE_TEST_FORCE_HASH_LOOP_AFTER_NUM_BUFFERS, "0"), DDui___(EXE_TEST_HASH_FORCE_OVERFLOW_EVERY, "0"), DDkwd__(EXE_UTIL_RWRS, "OFF"), DDkwd__(EXPAND_DP2_SHORT_ROWS, "ON"), XDDint__(EXPLAIN_DESCRIPTION_COLUMN_SIZE, "-1"), DDkwd__(EXPLAIN_DETAIL_COST_FOR_CALIBRATION, "FALSE"), DDkwd__(EXPLAIN_DISPLAY_FORMAT, "EXTERNAL"), DDkwd__(EXPLAIN_IN_RMS, "ON"), DDkwd__(EXPLAIN_OPTION_C, "OFF"), DDui___(EXPLAIN_OUTPUT_ROW_SIZE, "80"), DDui1__(EXPLAIN_ROOT_INPUT_VARS_MAX, "2000"), // maximum number of inputs that we can tolerate to // explain information for inputVars expression // this is needed to avoid stack overflow DDkwd__(EXPLAIN_SPACE_OPT, "ON"), DDkwd__(EXPLAIN_STRATEGIZER_PARAMETERS, "OFF"), DDflte_(EX_OP_ALLOCATE_ATP, ".02"), // Calibration // 01/23/98: 50. // Original: .1 DDflte_(EX_OP_ALLOCATE_BUFFER, "50."), DDflte_(EX_OP_ALLOCATE_BUFFER_POOL, ".1"), DDflte_(EX_OP_ALLOCATE_TUPLE, ".05"), // copy_atp affects the costing of NJ // History: // 08/21/98: 0.02, The previous change affected more than one operrator // 08/13/98: 1.0 // 01/08/98: 0.02 DDflte_(EX_OP_COPY_ATP, "1.1335"), DDflte_(EX_OP_DEQUEUE, ".02"), DDflte_(EX_OP_ENQUEUE, ".02"), DDkwd__(FAKE_VOLUME_ASSIGNMENTS, "OFF"), DDui1__(FAKE_VOLUME_NUM_VOLUMES, "24"), DDkwd__(FAST_DELETE, "OFF"), DDkwd__(FAST_DP2_SUBSET_OPT, "ON"), // upper and lower limit (2,10) must be in sync with error values in //ExFastTransport.cpp DDkwd__(FAST_EXTRACT_DIAGS, "OFF"), DDui2_10(FAST_EXTRACT_IO_BUFFERS, "6"), DDui___(FAST_EXTRACT_IO_TIMEOUT_SEC, "60"), DDkwd__(FAST_REPLYDATA_MOVE, "ON"), SDDkwd__(FFDC_DIALOUTS_FOR_MXCMP, "OFF"), DDkwd__(FIND_COMMON_SUBEXPRS_IN_OR, "ON"), DDui___(FLOAT_ESP_RANDOM_NUM_SEED, "0"), DDkwd__(FORCE_BUSHY_CQS, "ON"), DDkwd__(FORCE_PARALLEL_CREATE_INDEX, "OFF"), DDkwd__(FORCE_PARALLEL_INSERT_SELECT, "OFF"), DDkwd__(FORCE_PASS_ONE, "OFF"), DDkwd__(FORCE_PASS_TWO, "ON"), // Control if plan fragments need to be compressed // DDui___(FRAG_COMPRESSION_THRESHOLD, "16"), // Controls FSO Tests for debug // DDui___(FSO_RUN_TESTS, "0"), // Controls use of Simple File Scan Optimizer // IF 0 - Use original "Complex" File Scan Optimizer. // (in case simple causes problems) // IF 1 - Use logic to determine FSO to use. (default) // IF 2 - Use logic to determine FSO to use, but also use new // executor predicate costing. // IF >2 - Always use new "Simple" File Scan Optimizer. // (not recommended) // DDui___(FSO_TO_USE, "1"), // Disallow/Allow full outer joins in MultiJoin framework DDkwd__(FULL_OUTER_JOINS_SPOIL_JBB, "OFF"), DDkwd__(GA_PROP_INDEXES_ARITY_1, "ON"), // this default value is filled in // NADefaults::initCurrentDefaultsWithDefaultDefaults. The default value // is ON for static compiles and OFF for dynamic queries. DDkwd__(GENERATE_EXPLAIN, "ON"), DDipcBu(GEN_ALIGNED_PA_DP2_BUFFER_SIZE, "31000"), DDui1__(GEN_CBUF_BUFFER_SIZE, "30000"), DDui1__(GEN_CBUF_NUM_BUFFERS, "4"), DDui1__(GEN_CBUF_SIZE_DOWN, "8"), DDui1__(GEN_CBUF_SIZE_UP, "8"), DDui___(GEN_CS_BUFFER_SIZE, "0"), DDui___(GEN_CS_NUM_BUFFERS, "0"), DDui___(GEN_CS_SIZE_DOWN, "4"), DDui___(GEN_CS_SIZE_UP, "4"), DDkwd__(GEN_DBLIMITS_LARGER_BUFSIZE, "ON"), DDui1__(GEN_DDL_BUFFER_SIZE, "30000"), DDui1__(GEN_DDL_NUM_BUFFERS, "4"), DDui1__(GEN_DDL_SIZE_DOWN, "2"), DDui1__(GEN_DDL_SIZE_UP, "32"), DDui1__(GEN_DEL_BUFFER_SIZE, "512"), DDui1__(GEN_DEL_NUM_BUFFERS, "5"), DDui1__(GEN_DEL_SIZE_DOWN, "2"), DDui1__(GEN_DEL_SIZE_UP, "2"), DDui1__(GEN_DESC_BUFFER_SIZE, "10240"), DDui1__(GEN_DESC_NUM_BUFFERS, "4"), DDui1__(GEN_DESC_SIZE_DOWN, "2"), DDui1__(GEN_DESC_SIZE_UP, "16"), DDui1__(GEN_DP2I_BUFFER_SIZE, "10000"), DDui1__(GEN_DP2I_NUM_BUFFERS, "2"), DDui1__(GEN_DP2I_SIZE_DOWN, "32"), DDui1__(GEN_DP2I_SIZE_UP, "64"), DDui1__(GEN_DPDU_BUFFER_SIZE, "2"), DDui1__(GEN_DPDU_NUM_BUFFERS, "1"), DDui1__(GEN_DPDU_SIZE_DOWN, "2"), DDui1__(GEN_DPDU_SIZE_UP, "2"), DDui1__(GEN_DPRO_BUFFER_SIZE, "10240"), DDui1__(GEN_DPRO_NUM_BUFFERS, "1"), DDui1__(GEN_DPRO_SIZE_DOWN, "16"), DDui1__(GEN_DPRO_SIZE_UP, "16"), DDui1__(GEN_DPSO_BUFFER_SIZE, "10240"), DDui1__(GEN_DPSO_NUM_BUFFERS, "4"), DDui1__(GEN_DPSO_SIZE_DOWN, "2048"), DDui1__(GEN_DPSO_SIZE_UP, "2048"), DDui1__(GEN_DPUO_BUFFER_SIZE, "10000"), DDui1__(GEN_DPUO_NUM_BUFFERS, "4"), DDui1__(GEN_DPUO_SIZE_DOWN, "2048"), DDui1__(GEN_DPUO_SIZE_UP, "2048"), DDui1__(GEN_DPVI_BUFFER_SIZE, "10000"), DDui1__(GEN_DPVI_NUM_BUFFERS, "2"), DDui1__(GEN_DPVI_SIZE_DOWN, "32"), DDui1__(GEN_DPVI_SIZE_UP, "64"), DDui___(GEN_EIDR_BROKEN_TREE_CHECK_INTERVAL, "128"), DDipcBu(GEN_EIDR_BUFFER_SIZE, "31000"), DDui1__(GEN_EIDR_NUM_BUFFERS, "3"), DDui1__(GEN_EIDR_SIZE_DOWN, "2"), DDui1__(GEN_EIDR_SIZE_UP, "2"), DDui___(GEN_EIDR_STATS_REPLY_INTERVAL, "3000"), DDint__(GEN_EXCHANGE_MAX_MEM_IN_KB, "4000"), DDint__(GEN_EXCHANGE_MSG_COUNT, "80"), // Fast extract settings are for UDR method invocations DDui1__(GEN_FE_BUFFER_SIZE, "31000"), DDui1__(GEN_FE_NUM_BUFFERS, "2"), DDui1__(GEN_FE_SIZE_DOWN, "4"), DDui1__(GEN_FE_SIZE_UP, "4"), DDui1__(GEN_FSRT_BUFFER_SIZE, "5120"), DDui1__(GEN_FSRT_NUM_BUFFERS, "5"), DDui1__(GEN_FSRT_SIZE_DOWN, "2"), DDui1__(GEN_FSRT_SIZE_UP, "8"), // Do not alter the buffer size; it must be 56K for SCRATCH_MGMT_OPTION == 5 DDui1__(GEN_HGBY_BUFFER_SIZE, "262144"), DDui1__(GEN_HGBY_NUM_BUFFERS , "5"), DDui1__(GEN_HGBY_PARTIAL_GROUP_FLUSH_THRESHOLD, "100"), DDui___(GEN_HGBY_PARTIAL_GROUP_ROWS_PER_CLUSTER, "0"), DDui1__(GEN_HGBY_SIZE_DOWN, "2048"), DDui1__(GEN_HGBY_SIZE_UP, "2048"), // Do not alter the buffer size; it must be 56K for SCRATCH_MGMT_OPTION == 5 DDui1__(GEN_HSHJ_BUFFER_SIZE, "262144"), // Controls use of the hash join min/max optimization. DDkwd__(GEN_HSHJ_MIN_MAX_OPT, "OFF"), DDui1__(GEN_HSHJ_NUM_BUFFERS, "1"), DDui1__(GEN_HSHJ_SIZE_DOWN, "2048"), DDui1__(GEN_HSHJ_SIZE_UP, "2048"), DDui1__(GEN_IMDT_BUFFER_SIZE, "2"), DDui1__(GEN_IMDT_NUM_BUFFERS, "1"), DDui1__(GEN_IMDT_SIZE_DOWN, "2"), DDui1__(GEN_IMDT_SIZE_UP, "2"), DDui1__(GEN_INS_BUFFER_SIZE, "10240"), DDui1__(GEN_INS_NUM_BUFFERS, "3"), DDui1__(GEN_INS_SIZE_DOWN, "4"), DDui1__(GEN_INS_SIZE_UP, "128"), // Controls LeanEr Expression generation DDkwd__(GEN_LEANER_EXPRESSIONS, "ON"), DDui1__(GEN_LOCK_BUFFER_SIZE, "1024"), DDui1__(GEN_LOCK_NUM_BUFFERS, "1"), DDui1__(GEN_LOCK_SIZE_DOWN, "4"), DDui1__(GEN_LOCK_SIZE_UP, "4"), DDui1__(GEN_MATR_BUFFER_SIZE, "2"), DDui1__(GEN_MATR_NUM_BUFFERS, "1"), DDui1__(GEN_MATR_SIZE_DOWN, "2"), DDui1__(GEN_MATR_SIZE_UP, "8"), DDui1__(GEN_MEM_PRESSURE_THRESHOLD, "10000"), DDui1__(GEN_MJ_BUFFER_SIZE, "32768"), DDui1__(GEN_MJ_NUM_BUFFERS, "1"), DDui1__(GEN_MJ_SIZE_DOWN, "2"), DDui1__(GEN_MJ_SIZE_UP, "1024"), DDui1__(GEN_ONLJ_BUFFER_SIZE, "5120"), DDui1__(GEN_ONLJ_LEFT_CHILD_QUEUE_DOWN, "4"), DDui1__(GEN_ONLJ_LEFT_CHILD_QUEUE_UP, "2048"), DDui1__(GEN_ONLJ_NUM_BUFFERS, "5"), DDui1__(GEN_ONLJ_RIGHT_SIDE_QUEUE_DOWN, "2048"), DDui1__(GEN_ONLJ_RIGHT_SIDE_QUEUE_UP, "2048"), DDkwd__(GEN_ONLJ_SET_QUEUE_LEFT, "ON"), DDkwd__(GEN_ONLJ_SET_QUEUE_RIGHT, "ON"), DDui1__(GEN_ONLJ_SIZE_DOWN, "2048"), DDui1__(GEN_ONLJ_SIZE_UP, "2048"), DDui1__(GEN_PAR_LAB_OP_BUFFER_SIZE, "1024"), DDui1__(GEN_PAR_LAB_OP_NUM_BUFFERS, "1"), DDui1__(GEN_PAR_LAB_OP_SIZE_DOWN, "2"), DDui1__(GEN_PAR_LAB_OP_SIZE_UP, "4"), DDipcBu(GEN_PA_BUFFER_SIZE, "31000"), DDui1__(GEN_PA_NUM_BUFFERS, "5"), DDui1__(GEN_PA_SIZE_DOWN, "2048"), DDui1__(GEN_PA_SIZE_UP, "2048"), DDui1__(GEN_PROBE_CACHE_NUM_ENTRIES, "16384"),// number of entries DDui___(GEN_PROBE_CACHE_NUM_INNER, "0"), //0 means compiler decides DDui1__(GEN_PROBE_CACHE_SIZE_DOWN, "2048"), DDui1__(GEN_PROBE_CACHE_SIZE_UP, "2048"), DDui1__(GEN_RCRS_BUFFER_SIZE, "2"), DDui1__(GEN_RCRS_NUM_BUFFERS, "1"), DDui1__(GEN_RCRS_SIZE_DOWN, "8"), DDui1__(GEN_RCRS_SIZE_UP, "16"), DDkwd__(GEN_RESET_ACCESS_COUNTER, "OFF"), DDui1__(GEN_ROOT_BUFFER_SIZE, "2"), DDui1__(GEN_ROOT_NUM_BUFFERS, "1"), DDui1__(GEN_ROOT_SIZE_DOWN, "2"), DDui1__(GEN_ROOT_SIZE_UP, "2"), DDui1__(GEN_SAMPLE_BUFFER_SIZE, "5120"), DDui1__(GEN_SAMPLE_NUM_BUFFERS, "5"), DDui1__(GEN_SAMPLE_SIZE_DOWN, "16"), DDui1__(GEN_SAMPLE_SIZE_UP, "16"), DDui1__(GEN_SCAN_BUFFER_SIZE, "10240"), DDui1__(GEN_SCAN_NUM_BUFFERS, "10"), DDui1__(GEN_SCAN_SIZE_DOWN, "16"), DDui1__(GEN_SCAN_SIZE_UP, "32"), DDui1__(GEN_SEQFUNC_BUFFER_SIZE, "5120"), DDui1__(GEN_SEQFUNC_NUM_BUFFERS, "5"), DDui1__(GEN_SEQFUNC_SIZE_DOWN, "16"), DDui1__(GEN_SEQFUNC_SIZE_UP, "16"), DDkwd__(GEN_SEQFUNC_UNLIMITED_HISTORY, "OFF"), DDui1__(GEN_SEQ_BUFFER_SIZE, "512"), DDui1__(GEN_SEQ_NUM_BUFFERS, "5"), DDui1__(GEN_SEQ_SIZE_DOWN, "2"), DDui1__(GEN_SEQ_SIZE_UP, "2"), DDui1__(GEN_SGBY_BUFFER_SIZE, "5120"), DDui1__(GEN_SGBY_NUM_BUFFERS, "5"), DDui1__(GEN_SGBY_SIZE_DOWN, "2048"), DDui1__(GEN_SGBY_SIZE_UP, "2048"), DDui1__(GEN_SID_BUFFER_SIZE, "1024"), DDui1__(GEN_SID_NUM_BUFFERS, "4"), DDui1__(GEN_SNDB_BUFFER_SIZE, "2"), DDui1__(GEN_SNDB_NUM_BUFFERS, "4"), DDui1__(GEN_SNDB_SIZE_DOWN, "4"), DDui1__(GEN_SNDB_SIZE_UP, "128"), DDui___(GEN_SNDT_BUFFER_SIZE_DOWN, "0"), DDui___(GEN_SNDT_BUFFER_SIZE_UP, "0"), DDui1__(GEN_SNDT_NUM_BUFFERS, "2"), DDkwd__(GEN_SNDT_RESTRICT_SEND_BUFFERS, "ON"), DDui1__(GEN_SNDT_SIZE_DOWN, "4"), DDui1__(GEN_SNDT_SIZE_UP, "128"), DDui1__(GEN_SORT_MAX_BUFFER_SIZE, "5242880"), DDui1__(GEN_SORT_MAX_NUM_BUFFERS, "160"), DDui___(GEN_SORT_MIN_BUFFER_SIZE, "0"), DDui1__(GEN_SORT_NUM_BUFFERS, "2"), DDui1__(GEN_SORT_SIZE_DOWN, "2"), DDui1__(GEN_SORT_SIZE_UP, "1024"), DDkwd__(GEN_SORT_TOPN, "ON"), DDui1__(GEN_SORT_TOPN_THRESHOLD, "10000"), DDui1__(GEN_SPLB_BUFFER_SIZE, "2"), DDui1__(GEN_SPLB_NUM_BUFFERS, "1"), DDui1__(GEN_SPLB_SIZE_DOWN, "2"), DDui1__(GEN_SPLB_SIZE_UP, "2"), DDui1__(GEN_SPLT_BUFFER_SIZE, "2"), DDui1__(GEN_SPLT_NUM_BUFFERS, "1"), DDui1__(GEN_SPLT_SIZE_DOWN, "2048"), DDui1__(GEN_SPLT_SIZE_UP, "2048"), DDui1__(GEN_STPR_BUFFER_SIZE, "1024"), DDui1__(GEN_STPR_NUM_BUFFERS, "3"), DDui1__(GEN_STPR_SIZE_DOWN, "2"), DDui1__(GEN_STPR_SIZE_UP, "2"), DDui1__(GEN_TFLO_BUFFER_SIZE, "5120"), DDui1__(GEN_TFLO_NUM_BUFFERS, "2"), DDui1__(GEN_TFLO_SIZE_DOWN, "8"), DDui1__(GEN_TFLO_SIZE_UP, "16"), DDui512(GEN_TIMEOUT_BUFFER_SIZE, "4096"), DDui1__(GEN_TIMEOUT_NUM_BUFFERS, "1"), DDui2__(GEN_TIMEOUT_SIZE_DOWN, "2"), DDui2__(GEN_TIMEOUT_SIZE_UP, "4"), DDui1__(GEN_TRAN_BUFFER_SIZE, "4096"), DDui1__(GEN_TRAN_NUM_BUFFERS, "1"), DDui1__(GEN_TRAN_SIZE_DOWN, "2"), DDui1__(GEN_TRAN_SIZE_UP, "4"), DDui1__(GEN_TRSP_BUFFER_SIZE, "10240"), DDui1__(GEN_TRSP_NUM_BUFFERS, "5"), DDui1__(GEN_TRSP_SIZE_DOWN, "16"), DDui1__(GEN_TRSP_SIZE_UP, "16"), DDui1__(GEN_TUPL_BUFFER_SIZE, "1024"), DDui1__(GEN_TUPL_NUM_BUFFERS, "4"), DDui1__(GEN_TUPL_SIZE_DOWN, "2048"), DDui1__(GEN_TUPL_SIZE_UP, "2048"), // GEN_UDRRS_ settings are for stored procedure result // set proxy plans DDui1__(GEN_UDRRS_BUFFER_SIZE, "31000"), DDui1__(GEN_UDRRS_NUM_BUFFERS, "2"), DDui1__(GEN_UDRRS_SIZE_DOWN, "4"), DDui1__(GEN_UDRRS_SIZE_UP, "128"), // GEN_UDR_ settings are for UDR method invocations DDui1__(GEN_UDR_BUFFER_SIZE, "31000"), DDui1__(GEN_UDR_NUM_BUFFERS, "2"), DDui1__(GEN_UDR_SIZE_DOWN, "4"), DDui1__(GEN_UDR_SIZE_UP, "4"), DDui1__(GEN_UNLJ_BUFFER_SIZE, "5120"), DDui1__(GEN_UNLJ_NUM_BUFFERS, "5"), DDui1__(GEN_UNLJ_SIZE_DOWN, "8"), DDui1__(GEN_UNLJ_SIZE_UP, "16"), DDui1__(GEN_UN_BUFFER_SIZE, "10240"), DDui1__(GEN_UN_NUM_BUFFERS, "5"), DDui1__(GEN_UN_SIZE_DOWN, "8"), DDui1__(GEN_UN_SIZE_UP, "16"), DDui1__(GEN_UPD_BUFFER_SIZE, "5120"), DDui1__(GEN_UPD_NUM_BUFFERS, "5"), DDui1__(GEN_UPD_SIZE_DOWN, "2"), DDui1__(GEN_UPD_SIZE_UP, "2"), // Used when Compressed_Internal_Format is on to reduce space in the // hash buffers (Hash Join and Hash Groupby) and sort buffers. DDkwd__(GEN_VARIABLE_LENGTH_BUFFERS, "OFF"), DDui1__(GEN_XPLN_BUFFER_SIZE, "4096"), DDui1__(GEN_XPLN_NUM_BUFFERS, "3"), DDui1__(GEN_XPLN_SIZE_DOWN, "8"), DDui1__(GEN_XPLN_SIZE_UP, "16"), // When less or equal to this CQD (5000 rows by default), a partial root // will be running in the Master. Set to 0 to disable the feature. DDint__(GROUP_BY_PARTIAL_ROOT_THRESHOLD, "5000"), DDkwd__(GROUP_BY_PUSH_TO_BOTH_SIDES_OF_JOIN, "ON"), DDkwd__(GROUP_OR_ORDER_BY_EXPR, "ON"), // HASH_JOINS ON means do HASH_JOINS XDDkwd__(HASH_JOINS, "ON"), DDkwd__(HASH_JOINS_TYPE1_PLAN1, "ON"), DDkwd__(HASH_JOINS_TYPE1_PLAN2, "ON"), // HBase defaults // Some of the more important ones: // HBASE_CATALOG: Catalog of "_ROW_" and "_CELL_" schemas // HBASE_COPROCESSORS: Enable use of co-processors for aggregates. // need to set the coprocessor in HBase config file // HBASE_ESTIMATE_ROW_COUNT_VIA_COPROCESSOR: If ON, use a coprocessor when // estimating row counts; if OFF, use client side // code (the latter doesn't work if HBase encryption // is being used) // HBASE_INTERFACE: JNI or JNI_TRX (transactional interface) // HBASE_MAX_COLUMN_xxx_LENGTH: Max length of some // string columns in the "_ROW_" and "_CELL_" schemas // HBASE_SQL_IUD_SEMANTICS: Off: Don't check for existing rows for insert/update DDkwd__(HBASE_ASYNC_DROP_TABLE, "OFF"), DDkwd__(HBASE_ASYNC_OPERATIONS, "ON"), // HBASE_CACHE_BLOCKS, ON => cache every scan, OFF => cache no scan // SYSTEM => cache scans which take less than 1 RS block cache mem. DDui___(HBASE_BLOCK_SIZE, "65536"), DDkwd__(HBASE_CACHE_BLOCKS, "SYSTEM"), DD_____(HBASE_CATALOG, "HBASE"), DDkwd__(HBASE_CHECK_AND_UPDEL_OPT, "ON"), DDkwd__(HBASE_COMPRESSION_OPTION, ""), DDkwd__(HBASE_COPROCESSORS, "ON"), DDkwd__(HBASE_CREATE_OLD_MD_FOR_UPGRADE_TESTING, "OFF"), DDkwd__(HBASE_DATA_BLOCK_ENCODING_OPTION, ""), // If set to 'OFF' we get a stub cost of 1 for delete operations. // We can remove this once the delete costing code has broader // exposure. DDkwd__(HBASE_DELETE_COSTING, "ON"), DDflt0_(HBASE_DOP_PARALLEL_SCANNER, "0."), DDkwd__(HBASE_ESTIMATE_ROW_COUNT_VIA_COPROCESSOR, "OFF"), DDkwd__(HBASE_FILTER_PREDS, "OFF"), DDkwd__(HBASE_HASH2_PARTITIONING, "ON"), DDui___(HBASE_INDEX_LEVEL, "0"), DDui___(HBASE_MAX_COLUMN_INFO_LENGTH, "10000"), DDui___(HBASE_MAX_COLUMN_NAME_LENGTH, "100"), DDui___(HBASE_MAX_COLUMN_VAL_LENGTH, "1000"), DDui___(HBASE_MAX_ESPS, "9999"), DDui___(HBASE_MAX_NUM_SEARCH_KEYS, "512"), DDui1__(HBASE_MIN_BYTES_PER_ESP_PARTITION, "67108864"), DDkwd__(HBASE_NATIVE_IUD, "ON"), DDui1__(HBASE_NUM_CACHE_ROWS_MAX, "1024"), DDui1__(HBASE_NUM_CACHE_ROWS_MIN, "100"), DDkwd__(HBASE_RANGE_PARTITIONING, "ON"), DDkwd__(HBASE_RANGE_PARTITIONING_MC_SPLIT, "ON"), DDkwd__(HBASE_RANGE_PARTITIONING_PARTIAL_COLS,"ON"), DDui___(HBASE_REGION_SERVER_MAX_HEAP_SIZE, "1024"), // in units of MB DDkwd__(HBASE_ROWSET_VSBB_OPT, "ON"), DDusht_(HBASE_ROWSET_VSBB_SIZE, "1024"), DDflt0_(HBASE_SALTED_TABLE_MAX_FILE_SIZE, "0"), DDkwd__(HBASE_SALTED_TABLE_SET_SPLIT_POLICY, "ON"), DD_____(HBASE_SCHEMA, "HBASE"), DDkwd__(HBASE_SERIALIZATION, "ON"), DD_____(HBASE_SERVER, ""), DDkwd__(HBASE_SMALL_SCANNER, "OFF"), DDkwd__(HBASE_SQL_IUD_SEMANTICS, "ON"), DDkwd__(HBASE_STATS_PARTITIONING, "ON"), DDkwd__(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT, "OFF"), // If set to 'OFF' we get a stub cost of 1 for update operations. // We can remove this once the delete costing code has broader // exposure. This is 'OFF' at the moment because the update code // is only partially written. DDkwd__(HBASE_UPDATE_COSTING, "OFF"), DDkwd__(HBASE_UPDEL_CURSOR_OPT, "ON"), DDui___(HBASE_USE_FAKED_REGIONS, "0"), DD_____(HBASE_ZOOKEEPER_PORT, ""), DDui1__(HDFS_IO_BUFFERSIZE, "65536"), DDui___(HDFS_IO_BUFFERSIZE_BYTES, "0"), // The value 0 denotes RangeTail = max record length of table. DDui___(HDFS_IO_RANGE_TAIL, "0"), DDkwd__(HDFS_PREFETCH, "ON"), DDkwd__(HDFS_READ_CONTINUE_ON_ERROR, "OFF"), DDui1__(HDFS_REPLICATION, "1"), DDkwd__(HDFS_USE_CURSOR_MULTI, "OFF"), DDkwd__(HGB_BITMUX, "OFF"), DDflt0_(HGB_CPUCOST_INITIALIZE, "1."), DDflt0_(HGB_DP2_MEMORY_LIMIT, "10000."), DDflte_(HGB_GROUPING_FACTOR_FOR_SPILLED_CLUSTERS, ".5"), DDflte_(HGB_MAX_TABLE_SIZE_FOR_CLUSTERS, "4E5"), DDflte_(HGB_MEMORY_AVAILABLE_FOR_CLUSTERS, "10"), DDflte_(HH_OP_ALLOCATE_BUCKET_ARRAY, ".1"), DDflte_(HH_OP_ALLOCATE_CLUSTER, ".1"), DDflte_(HH_OP_ALLOCATE_CLUSTERDB, ".1"), DDflte_(HH_OP_ALLOCATE_HASH_TABLE, ".05"), DDflt1_(HH_OP_HASHED_ROW_OVERHEAD, "8."), // From observation: // 03/11/98: probing the hash table is very inexpensive, // thus reduce this to almost zero. // change // CPUCOST_HASH_PER_KEY // when changing this value // It takes around 2 seconds to insert 100,000 rows into the chain: // @ 0.00005 secs per k instr: // k instr= 2/0.00005/100000 = 0.4 // History: // Before 03/11/98: 0.4 // Initially: 0.01 DDflte_(HH_OP_INSERT_ROW_TO_CHAIN, "0.51"), // From observation: // 03/11/98: probing the hash table is very inexpensive, // thus reduce this to almost zero. // 01/05/98: 15,433 rows probed per second @ 0.00005 seconds // per thousand of instructions, give: // seconds to probe one row = 1/15,433 = 0.000064796 // This time includes: time to position and to compare. Thus // subtract the time to compare to arrive to the proper number: // thd. of instructions per row inserted = // 1/15,433/0.00005 - CPUCOST_COMPARE_SIMPLE_DATA_TYPE = // 1.2959 - 0.2 = 1.0959 // History: // Before 03/11/98: 1.0959 // Before 01/05/98: 0.01 DDflt0_(HH_OP_PROBE_HASH_TABLE, "0.011"), DDflt0_(HH_OP_READ_HASH_BUFFER, "0."), DDflt0_(HH_OP_WRITE_HASH_BUFFER, "0."), // Added 10/16/02 DDkwd__(HIDE_INDEXES, "NONE"), DDansi_(HISTOGRAMS_SCHEMA, ""), // ------------------------------------------------------------------------- // Histogram fudge factors // ------------------------------------------------------------------------- //HIST_BASE_REDUCTION and HIST_PREFETCH externalized 08/21/01 CR 10-010713-3895 DDkwd__(HIST_ASSUME_INDEPENDENT_REDUCTION, "ON"), XDDkwd__(HIST_AUTO_GENERATION_OF_SAMPLE, "OFF"), DDkwd__(HIST_BASE_REDUCTION, "ON"), DDflt0_(HIST_BASE_REDUCTION_FUDGE_FACTOR, "0.1"), DDflt0_(HIST_CONSTANT_ALPHA, "0.5"), DDflt_0_1(HIST_DEFAULT_BASE_SEL_FOR_LIKE_WILDCARD, "0.50"), DDui1__(HIST_DEFAULT_NUMBER_OF_INTERVALS, "50"), DDui1__(HIST_DEFAULT_SAMPLE_MAX, "1000000"), DDui1__(HIST_DEFAULT_SAMPLE_MIN, "10000"), DDflt_0_1(HIST_DEFAULT_SAMPLE_RATIO, "0.01"), DDflte_(HIST_DEFAULT_SEL_FOR_BOOLEAN, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_IS_NULL, "0.01"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_JOIN_EQUAL, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_JOIN_RANGE, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_LIKE_NO_WILDCARD,"1.0"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_LIKE_WILDCARD, "0.10"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_PRED_EQUAL, "0.01"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_PRED_RANGE, "0.3333"), // control the amount of data in each partition of the persistent sample tble. DDflt1_(HIST_FETCHCOUNT_SCRATCH_VOL_THRESHOLD, "10240000"), DDkwd__(HIST_FREQ_VALS_NULL_FIX, "ON"), DDkwd__(HIST_INCLUDE_SKEW_FOR_NON_INNER_JOIN, "ON"), DDkwd__(HIST_INTERMEDIATE_REDUCTION, "OFF"), DDflt0_(HIST_INTERMEDIATE_REDUCTION_FUDGE_FACTOR, "0.25"), DDflt_0_1(HIST_JOIN_CARD_LOWBOUND, "1.0"), DDui1__(HIST_LOW_UEC_THRESHOLD, "55"), DDui1__(HIST_MAX_NUMBER_OF_INTERVALS, "10000"), DDkwd__(HIST_MC_STATS_NEEDED, "ON"), DDkwd__(HIST_MERGE_FREQ_VALS_FIX, "ON"), // Histogram min/max optimization: when the predicate is of form // T.A = MIN/MAX(S.B), replace the histogram(T.A) with // single_int_histogram(MIN/MAX(S.B)). Do this only when // there is no local predicate on S and there exists a frequent // value that is equals to MIN/MAX(S.B). DDkwd__(HIST_MIN_MAX_OPTIMIZATION, "ON"), // This CQD is used to control the number of missing stats warnings // that should be generated. // 0 ? Display no warnings. // 1 ? Display only missing single column stats warnings. These include 6008 and 6011 // 2 ? Display all single column missing stats warnings and // multi-column missing stats warnings for Scans only. // 3 ? Display all missing single column stats warnings and missing // multi-column stats warnings for Scans and Join operators only.. // 4 ? Display all missing single column stats and missing multi-column // stats warnings for all operators including Scans, Joins and GroupBys. // The CQD also does not have an impact on the auto update stats behavior. The stats will // still be automatically generated even if the warnings have been suppressed. // USTAT_AUTO_MISSING_STATS_LEVEL. // Default behavior is to generate all warnings XDDui___(HIST_MISSING_STATS_WARNING_LEVEL, "4"), DDflt1_(HIST_NO_STATS_ROWCOUNT, "100"), DDflt1_(HIST_NO_STATS_UEC, "2"), DDflt1_(HIST_NO_STATS_UEC_CHAR1, "10"), DDui1__(HIST_NUM_ADDITIONAL_DAYS_TO_EXTRAPOLATE, "4"), DDintN1__(HIST_ON_DEMAND_STATS_SIZE, "0"), DDui___(HIST_OPTIMISTIC_CARD_OPTIMIZATION, "1"), XDDkwd__(HIST_PREFETCH, "ON"), XDDkwd__(HIST_REMOVE_TRAILING_BLANKS, "ON"), // should remove after verifying code is solid DDansi_(HIST_ROOT_NODE, ""), XDDflt1_(HIST_ROWCOUNT_REQUIRING_STATS, "500"), DDflt0_(HIST_SAME_TABLE_PRED_REDUCTION, "0.0"), DD_____(HIST_SCRATCH_VOL, ""), // control the amount of data in each partition of the sample tble. DDflt1_(HIST_SCRATCH_VOL_THRESHOLD, "10240000"), DDflt_0_1(HIST_SKEW_COST_ADJUSTMENT, "0.2"), DDkwd__(HIST_SKIP_MC_FOR_NONKEY_JOIN_COLUMNS, "OFF"), DDui___(HIST_TUPLE_FREQVAL_LIST_THRESHOLD, "40"), DDkwd__(HIST_USE_HIGH_FREQUENCY_INFO, "ON"), XDDkwd__(HIST_USE_SAMPLE_FOR_CARDINALITY_ESTIMATION , "ON"), // CQDs for Trafodion on Hive // Main ones to use: // HIVE_MAX_STRING_LENGTH_IN_BYTES: Hive "string" data type gets converted // into a VARCHAR with this length // HIVE_MIN_BYTES_PER_ESP_PARTITION: Make one ESP for this many bytes // HIVE_NUM_ESPS_PER_DATANODE: Equivalent of MAX_ESPS_PER_CPU_PER_OP // Note that this is really per SeaQuest node DD_____(HIVE_CATALOG, ""), DDkwd__(HIVE_DATA_MOD_CHECK, "ON"), DDkwd__(HIVE_DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_UTF8), DD_____(HIVE_DEFAULT_SCHEMA, "HIVE"), DD_____(HIVE_FILE_CHARSET, ""), DD_____(HIVE_FILE_NAME, "/user/trafodion/hive/tpcds/customer/customer.dat" ), DD_____(HIVE_HDFS_STATS_LOG_FILE, ""), DDui___(HIVE_INSERT_ERROR_MODE, "1"), DDint__(HIVE_LIB_HDFS_PORT_OVERRIDE, "-1"), DDint__(HIVE_LOCALITY_BALANCE_LEVEL, "0"), DDui___(HIVE_MAX_ESPS, "9999"), DDui___(HIVE_MAX_STRING_LENGTH_IN_BYTES, "32000"), DDkwd__(HIVE_METADATA_JAVA_ACCESS, "ON"), DDint__(HIVE_METADATA_REFRESH_INTERVAL, "0"), DDflt0_(HIVE_MIN_BYTES_PER_ESP_PARTITION, "67108864"), DDkwd__(HIVE_NO_REGISTER_OBJECTS, "OFF"), DDui___(HIVE_NUM_ESPS_PER_DATANODE, "2"), DDpct__(HIVE_NUM_ESPS_ROUND_DEVIATION, "34"), DDint__(HIVE_SCAN_SPECIAL_MODE, "0"), DDkwd__(HIVE_SORT_HDFS_HOSTS, "ON"), DDkwd__(HIVE_USE_EXT_TABLE_ATTRS, "ON"), DD_____(HIVE_USE_FAKE_SQ_NODE_NAMES, "" ), DDkwd__(HIVE_USE_FAKE_TABLE_DESC, "OFF"), DDkwd__(HIVE_USE_HASH2_AS_PARTFUNCION, "ON"), DDkwd__(HIVE_VIEWS, "ON"), // ------------------------------------------------------------------------- DDui2__(HJ_BUFFER_SIZE, "32"), DDflt0_(HJ_CPUCOST_INITIALIZE, "1."), DDui1__(HJ_INITIAL_BUCKETS_PER_CLUSTER, "4."), DDkwd__(HJ_NEW_MCSB_PLAN, "OFF"), DDint__(HJ_SCAN_TO_NJ_PROBE_SPEED_RATIO, "2000"), DDkwd__(HJ_TYPE, "HYBRID"), DD_____(HP_ROUTINES_SCHEMA, "NEO.HP_ROUTINES"), // Must be in form <cat>.<sch> DDkwd__(HQC_CONVDOIT_DISABLE_NUMERIC_CHECK, "OFF"), DDkwd__(HQC_LOG, "OFF"), DD_____(HQC_LOG_FILE, ""), DDui1_10(HQC_MAX_VALUES_PER_KEY, "5"), DDkwd__(HYBRID_QUERY_CACHE, "ON"), DDkwd__(IF_LOCKED, "WAIT"), // ignore_duplicate_keys is no more valid. It is still // here as dummy for compatibility with existing scripts. DDkwd__(IGNORE_DUPLICATE_KEYS, "SYSTEM"), // in mode_special_1, duplicate rows are ignored if inserting a row in the // base table which has a user defined primary key. If this default is set // to OFF in mode_special_1, then duplicate rows are not ignored. // // If not in mode_special_1, and this default is ON, then duplicate rows // are ignored. DDkwd__(IGNORE_DUPLICATE_ROWS, "SYSTEM"), DDkwd__(IMPLICIT_DATETIME_INTERVAL_HOSTVAR_CONVERSION, "FALSE"), DDkwd__(IMPLICIT_HOSTVAR_CONVERSION, "FALSE"), // threshold for the number of rows inserted into a volatile/temp // table which will cause an automatic update stats. // -1 indicates do not upd stats. 0 indicates always upd stats. DDint__(IMPLICIT_UPD_STATS_THRESHOLD, "-1"), //"10000"), DDkwd__(INCORPORATE_SKEW_IN_COSTING, "ON"), DDkwd__(INDEX_ELIMINATION_LEVEL, "AGGRESSIVE"), DDui1__(INDEX_ELIMINATION_THRESHOLD, "50"), DDkwd__(INDEX_HINT_WARNINGS, "ON"), SDDkwd__(INFER_CHARSET, "OFF"), // UDF initial row cost CQDs DDui___(INITIAL_UDF_CPU_COST, "100"), DDui___(INITIAL_UDF_IO_COST, "1"), DDui___(INITIAL_UDF_MSG_COST, "2"), DDkwd__(INPUT_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), // SQLCHARSETSTRING_UTF8 XDDkwd__(INSERT_VSBB, "SYSTEM"), //10-040621-7139-begin //This CDQ will alllow the user to force the compiler to //choose an interactive access path. ie., prefer access path with //index in it. If such a path is not found which ever access path is //available is chosen. DDkwd__(INTERACTIVE_ACCESS, "OFF"), //10-040621-7139-end DDkwd__(IN_MEMORY_OBJECT_DEFN, "OFF"), DDflte_(IO_SEEKS_INORDER_FACTOR, "0.10"), // History: // 3/11/99 Changed to zero because in large tables the read-ahead // seems negligible (and/or hard to simulate) // Before 3/11/99: 0.58 DDflt0_(IO_TRANSFER_COST_PREFETCH_MISSES_FRACTION, "0."), XDDkwd__(ISOLATION_LEVEL, "READ_COMMITTED"), XDDkwd__(ISOLATION_LEVEL_FOR_UPDATES, "NONE"), SDDkwd__(ISO_MAPPING, (char *)SQLCHARSETSTRING_ISO88591), DDkwd__(IS_DB_TRANSPORTER, "OFF"), DDkwd__(IS_SQLCI, "FALSE"), DDkwd__(IUD_NONAUDITED_INDEX_MAINT, "OFF"), DDkwd__(JDBC_PROCESS, "FALSE"), // Force the join order given by the user XDDkwd__(JOIN_ORDER_BY_USER, "OFF"), DDkwd__(KEYLESS_NESTED_JOINS, "OFF"), XDDkwd__(LAST0_MODE, "OFF"), DDansi_(LDAP_USERNAME, ""), // Disallow/Allow left joins in MultiJoin framework DDkwd__(LEFT_JOINS_SPOIL_JBB, "OFF"), DDkwd__(LIMIT_HBASE_SCAN_DOP, "OFF"), // if this default is set to ON, then the max precision of a numeric // expression(arithmetic, aggregate) is limited to MAX_NUMERIC_PRECISION // (= 18). If this is set to OFF, the default value, then the max precision // is computed based on the operands and the operation which could make the // result a software datatype(BIGNUM). Software datatypes give better // precision but degraded performance. SDDkwd__(LIMIT_MAX_NUMERIC_PRECISION, "SYSTEM"), // Size in bytes used to perform garbage collection to lob data file // default size is 5GB . Change to adjust disk usage. If 0 it means // don't do GC DDint__(LOB_GC_LIMIT_SIZE, "5000"), DDint__(LOB_HDFS_PORT, "0"), DD_____(LOB_HDFS_SERVER, "default"), DDint__(LOB_INPUT_LIMIT_FOR_BATCH, "4096"), // Size of memoryin Megabytes used to perform I/O to lob data file // default size is 512MB . Change to adjust memory usage. DDint__(LOB_MAX_CHUNK_MEM_SIZE, "512"), // default size is 10 G (10000 M) DDint__(LOB_MAX_SIZE, "10000"), // (unused)default size is 32000. Change this to extract more data into memory. DDui___(LOB_OUTPUT_SIZE, "32000"), DD_____(LOB_STORAGE_FILE_DIR, "/user/trafodion/lobs"), // storage types defined in exp/ExpLOBenum.h. // Default is hdfs_file (value = 2) DDint__(LOB_STORAGE_TYPE, "2"), //New default size for buffer size for local node DDui2__(LOCAL_MESSAGE_BUFFER_SIZE, "50"), DDansi_(MAINTAIN_CATALOG, "NEO"), // Set the maintain control table timeout to 5 minutes DDint__(MAINTAIN_CONTROL_TABLE_TIMEOUT, "30000"), DDint__(MAINTAIN_REORG_PRIORITY, "-1"), DDint__(MAINTAIN_REORG_PRIORITY_DELTA, "0"), DDint__(MAINTAIN_REORG_RATE, "40"), DDint__(MAINTAIN_REORG_SLACK, "0"), DDint__(MAINTAIN_UPD_STATS_SAMPLE, "-1"), DDkwd__(MARIAQUEST_PROCESS, "OFF"), DDSint__(MASTER_PRIORITY, "0"), DDSint__(MASTER_PRIORITY_DELTA, "0"), DDint__(MATCH_CONSTANTS_OF_EQUALITY_PREDICATES, "2"), DDui1__(MAX_ACCESS_NODES_PER_ESP, "1024"), // this is the default length of a param which is typed as a VARCHAR. DDui2__(MAX_CHAR_PARAM_DEFAULT_SIZE, "32"), DDint__(MAX_DEPTH_TO_CHECK_FOR_CYCLIC_PLAN, "1"), // default value of maximum dp2 groups for a hash-groupby DDui1__(MAX_DP2_HASHBY_GROUPS, "1000"), // // The max number of ESPs per cpu for a given operator. // i.e. this number times the number of available CPUs is "max pipelines". // // On Linux, "CPU" means cores. // DDflt__(MAX_ESPS_PER_CPU_PER_OP, "0.5"), DDui1__(MAX_EXPRS_USED_FOR_CONST_FOLDING, "1000"), // used in hash groupby costing in esp/master DDui1__(MAX_HEADER_ENTREIS_PER_HASH_TABLE, "250000"), DDui1__(MAX_LONG_VARCHAR_DEFAULT_SIZE, "2000"), DDui1__(MAX_LONG_WVARCHAR_DEFAULT_SIZE, "2000"), DD18_128(MAX_NUMERIC_PRECISION_ALLOWED, "128"), // The max number of vertical partitions for optimization to be done under // a VPJoin. DDui___(MAX_NUM_VERT_PARTS_FOR_OPT, "20"), DDui1__(MAX_ROWS_LOCKED_FOR_STABLE_ACCESS, "1"), // The max number of skewed values detected - skew buster DDui1__(MAX_SKEW_VALUES_DETECTED, "10000"), // multi-column skew inner table broadcast threashold in bytes (=1 MB) DDui___(MC_SKEW_INNER_BROADCAST_THRESHOLD, "1000000"), // multi-column skew sensitivity threshold // // For new MCSB (that is, we utilize MC skews directly), // apply the MC skew buster when // frequency of MC skews > MC_SKEW_SENSITIVITY_THRESHOLD / count_of_cpus // // For old MCSB (that is, we guess MC skews from SC skews), // apply the MC skew buster when // SFa,b... * countOfPipeline > MC_SKEW_SENSITIVITY_THRESHOLD // SFa,b ... is the skew factor for multi column a,b,... // XDDflt__(MC_SKEW_SENSITIVITY_THRESHOLD, "0.1"), // Applies additional heuristics to turn off MDAM. This code was added // at a time when cumulative probe cost was not considered in the MDAM // cost model (a MAJOR error). The meaning of the values: // 0 - do nothing (no additional heuristics) // 1 - require that more key columns have predicates than key columns // without predicates // 2 - require that the UECs of key columns lacking predicates is below // a given threshold // 3 - AND of 1 and 2 // These heuristics made sense when the cost model was broken and we // traversed more deeply into the key than we should have. The cost model // now takes cumulative probe cost into account so these should no longer // be necessary (and indeed these heuristics can cause us to miss some // very good plans, because they prevent us from considering MDAM plans // that probe prefix columns before a column with high UEC, for example). DDui___(MDAM_APPLY_RESTRICTION_CHECK, "0"), DDflt0_(MDAM_CPUCOST_NET_OVH, "2000."), // The cost that takes to build the mdam network per predicate: // (we assume that the cost to build the mdam network is a linear function // of the key predicates) DDflt0_(MDAM_CPUCOST_NET_PER_PRED, ".5"), // controls the max. number of seek positions under which MDAM will be // allowed. Set it to 0 turns off the feature. XDDui___(MDAM_NO_STATS_POSITIONS_THRESHOLD, "10"), // a multiplier of probe cost used for MDAM DDflt0_(MDAM_PROBE_TAX, "3"), // MDAM_SCAN_METHOD ON means MDAM is enabled, // OFF means MDAM is disabled. MDAM is enabled by default // externalized 06/21/01 RV // mdam off on open source at this point XDDkwd__(MDAM_SCAN_METHOD, "ON"), DDflt0_(MDAM_SELECTION_DEFAULT, "0.5"), DDflt0_(MDAM_TOTAL_UEC_CHECK_MIN_RC_THRESHOLD, "10000"), DDflt0_(MDAM_TOTAL_UEC_CHECK_UEC_THRESHOLD, "0.2"), DDkwd__(MDAM_TRACING, "OFF"), // controls the max. number of probes at which MDAM under NJ plan will be // generated. Set it to 0 turns off the feature. XDDui___(MDAM_UNDER_NJ_PROBES_THRESHOLD, "0"), // controls the amount of penalty for CPU resource required that is // beyond the value specified by MDOP_CPUS_SOFT_LIMIT. The number of extra CPUs // actually allocated is computed as the origial value divided by the CQD. // If the CQD is set to 1 (default), then there is no penalty. DDflt1_(MDOP_CPUS_PENALTY, "70"), // specify the limit beyond which the number of CPUs will be limited. DDui1__(MDOP_CPUS_SOFT_LIMIT, "64"), // controls the amount of penalty for CPU resource per memory unit // required that is beyond the value specified by MDOP_CPUS_SOFT_LIMIT. // The number of extra CPUs actually allocated is computed as the // origial value divided by the CQD. DDflt1_(MDOP_MEMORY_PENALTY, "70"), // CQD to test/enforce heap memory upper limits // values are in KB DDui___(MEMORY_LIMIT_CMPCTXT_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_CMPSTMT_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_HISTCACHE_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_NATABLECACHE_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_QCACHE_UPPER_KB, "0"), // SQL/MX Compiler/Optimzer Memory Monitor. DDkwd__(MEMORY_MONITOR, "OFF"), DDui1__(MEMORY_MONITOR_AFTER_TASKS, "30000"), DDkwd__(MEMORY_MONITOR_IN_DETAIL, "OFF"), DD_____(MEMORY_MONITOR_LOGFILE, "NONE"), DDkwd__(MEMORY_MONITOR_LOG_INSTANTLY, "OFF"), DDui1__(MEMORY_MONITOR_TASK_INTERVAL, "5000"), // Hash join currently uses 20 Mb before it overflows, use this // as the limit DDui1__(MEMORY_UNITS_SIZE, "20480"), // amount of memory available per CPU for any query SDDflte_(MEMORY_UNIT_ESP, "300"), DDflt1_(MEMORY_USAGE_NICE_CONTEXT_FACTOR, "1"), DDflt1_(MEMORY_USAGE_OPT_PASS_FACTOR, "1.5"), DDui1__(MEMORY_USAGE_SAFETY_NET, "500"), // MERGE_JOINS ON means do MERGE_JOINS XDDkwd__(MERGE_JOINS, "ON"), DDkwd__(MERGE_JOIN_ACCEPT_MULTIPLE_NJ_PROBES, "ON"), DDkwd__(MERGE_JOIN_CONTROL, "OFF"), DDkwd__(MERGE_JOIN_WITH_POSSIBLE_DEADLOCK, "OFF"), // controls if merge/upsert is supported on table with a unique index DDkwd__(MERGE_WITH_UNIQUE_INDEX, "ON"), SDDui___(METADATA_CACHE_SIZE, "20"), DDkwd__(METADATA_STABLE_ACCESS, "OFF"), //------------------------------------------------------------------- // Minimum ESP parallelism. If the user does not specify this value // (default value 0 does not change) then the number of segments // (totalNumCPUs/16, where totalNumCPUs=gpClusterInfo->numOfSMPs()) // will be used as the value of minimum ESP parallelism. If user sets // this value it should be integer between 1 and totalNumCPUs. In // this case actual value of minimum ESP parallelism will be // min(CDQ value, MDOP), where MDOP (maximum degree of parallelism) // is defined by adaptive segmentation //------------------------------------------------------------------- DDui___(MINIMUM_ESP_PARALLELISM, "0"), DDui1__(MIN_LONG_VARCHAR_DEFAULT_SIZE, "1"), DDui1__(MIN_LONG_WVARCHAR_DEFAULT_SIZE, "1"), DDkwd__(MIN_MAX_OPTIMIZATION, "ON"), DDpct__(MJ_BMO_QUOTA_PERCENT, "0"), DDflt0_(MJ_CPUCOST_ALLOCATE_LIST, ".05"), DDflt0_(MJ_CPUCOST_CLEAR_LIST, ".01"), DDflt0_(MJ_CPUCOST_GET_NEXT_ROW_FROM_LIST, ".01"), // calibrated 01/16/98: // 01/13/98 40000., this did not work with small tables // Before 01/13/98: 0.5 DDflt0_(MJ_CPUCOST_INITIALIZE, "1."), // Before 03/12/98: 0.4 // Before 01/13/98: 0.01 DDflt0_(MJ_CPUCOST_INSERT_ROW_TO_LIST, ".0001"), DDflt0_(MJ_CPUCOST_REWIND_LIST, ".01"), DDflte_(MJ_LIST_NODE_SIZE, ".01"), DDkwd__(MJ_OVERFLOW, "ON"), DDkwd__(MODE_SEABASE, "ON"), DDkwd__(MODE_SEAHIVE, "ON"), SDDkwd__(MODE_SPECIAL_1, "OFF"), DDkwd__(MODE_SPECIAL_4, "OFF"), DDflt0_(MSCF_CONCURRENCY_IO, "0.10"), DDflt0_(MSCF_CONCURRENCY_MSG, "0.10"), // Tests suggest that RELEASE is about 2.5 times faster than DEBUG // RELEASE is always faster than DEBUG code so this default must be // at least one. DDflt1_(MSCF_DEBUG_TO_RELEASE_MULTIPLIER, "2.5"), // MSCF_ET_CPU units are seconds/thousand of CPU instructions // History: // Before 02/01/99, the speed was calibrated for debug, now its is for // release: 0.00005 DDflte_(MSCF_ET_CPU, "0.000014"), // was 0.00002 12/2k // MSCF_ET_IO_TRANSFER units are seconds/Kb // History // Changed to '0.000455' to reflect new calibration data // Before 03/11/99 "0.000283" DDflte_(MSCF_ET_IO_TRANSFER, "0.00002"), // Assume time to transfer a KB of local message is 5 times // faster than the time to transfer a KB from disk // Units of MSCF_ET_LOCAL_MSG_TRANSFER are seconds/Kb DDflte_(MSCF_ET_LOCAL_MSG_TRANSFER, "0.000046"), // $$$ This should be removed. It is only used by preliminary costing // for the materialize operator, which should not be using it. DDflte_(MSCF_ET_NM_PAGE_FAULTS, "1"), // "?" used? // : for calibration on 04/08/2004 // Seek time will be derived from disk type. // MSCF_ET_NUM_IO_SEEKS units are seconds DDflte_(MSCF_ET_NUM_IO_SEEKS, "0.0038"), // Assume sending a local message takes 1000 cpu instructions DDflte_(MSCF_ET_NUM_LOCAL_MSGS, "0.000125"), // Assume sending a remote message takes 10000 cpu instructions // DDflte_(MSCF_ET_NUM_REMOTE_MSGS, "0.00125"), // Change the number of instructions to encode a remote message to be // the same as the local message DDflte_(MSCF_ET_NUM_REMOTE_MSGS, "0.000125"), // Assume 1MB/second transfer rate for transferring remote message bytes // (Based on 10 Megabit/second Ethernet transfer rate) // MSCF_ET_REMOTE_MSG_TRANSFER units are kb/Sec // DDflte_(MSCF_ET_REMOTE_MSG_TRANSFER, "0.001"), // the remote msg are 10% more costly than the local transfer // but also may depend on the physical link, so externalize it DDflte_(MSCF_ET_REMOTE_MSG_TRANSFER, "0.00005"), // ------------------------------------------------------------------------- // Factors used for estimating overlappability of I/O and messaging used // in the calculation for overlapped addition // Assume 50% overlap for now. // ------------------------------------------------------------------------- DDflte_(MSCF_OV_IO, "0.5"), DDflte_(MSCF_OV_IO_TRANSFER, "0.5"), DDflte_(MSCF_OV_LOCAL_MSG_TRANSFER, "0.5"), DDflte_(MSCF_OV_MSG, "0.5"), DDflte_(MSCF_OV_NUM_IO_SEEKS, "0.5"), DDflte_(MSCF_OV_NUM_LOCAL_MSGS, "0.5"), DDflte_(MSCF_OV_NUM_REMOTE_MSGS, "0.5"), DDflte_(MSCF_OV_REMOTE_MSG_TRANSFER, "0.5"), DDui___(MSCF_SYS_DISKS, "16"), // "?" used? DDui___(MSCF_SYS_MEMORY_PER_CPU, "1"), // "?" used? DDui___(MSCF_SYS_TEMP_SPACE_PER_DISK, "50"), // "?" used? DDkwd__(MTD_GENERATE_CC_PREDS, "ON"), DDint__(MTD_MDAM_NJ_UEC_THRESHOLD, "100"), // Allow for the setting of the row count in a long running operation XDDui1__(MULTI_COMMIT_SIZE, "10000"), // try the join order specified in the queries, this will cause the // enumeration of the initial join order specified by the user // among the join orders enumerated // ** This is currently OFF by default ** DDkwd__(MULTI_JOIN_CONSIDER_INITIAL_JOIN_ORDER, "OFF"), // used in JBBSubsetAnalysis::isAStarPattern for finding lowest cost // outer subtree for NJ into fact table. DDflt0_(MULTI_JOIN_PROBE_HASH_TABLE, "0.000001"), // threshold above which a query is considered complex // this only applies to queries that can be rewritten // as Multi Joins DDint__(MULTI_JOIN_QUERY_COMPLEXITY_THRESHOLD, "5120"), // threshold above which a query is considered to do // a lot of work his only applies to queries that can be // rewritten as Multi Joins DDflt__(MULTI_JOIN_QUERY_WORK_THRESHOLD, "0"), SDDint__(MULTI_JOIN_THRESHOLD, "3"), DDint__(MULTI_PASS_JOIN_ELIM_LIMIT, "5"), DDflt0_(MU_CPUCOST_INITIALIZE, ".05"), DDui___(MU_INITIAL_BUFFER_COUNT, "5."), DDflte_(MU_INITIAL_BUFFER_SIZE, "1033.7891"), //-------------------------------------------------------------------------- //++ MV XDDkwd__(MVGROUP_AUTOMATIC_CREATION, "ON"), DDkwd__(MVQR_ALL_JBBS_IN_QD, "OFF"), #ifdef NDEBUG DDkwd__(MVQR_ENABLE_LOGGING, "OFF"), // No logging by default for release #else DDkwd__(MVQR_ENABLE_LOGGING, "ON"), #endif DD_____(MVQR_FILENAME_PREFIX, "/usr/tandem/sqlmx/log"), DDkwd__(MVQR_LOG_QUERY_DESCRIPTORS, "OFF"), DDint__(MVQR_MAX_EXPR_DEPTH, "20"), DDint__(MVQR_MAX_EXPR_SIZE, "100"), DDint__(MVQR_MAX_MV_JOIN_SIZE, "10"), DDkwd__(MVQR_PARAMETERIZE_EQ_PRED, "ON"), DDkwd__(MVQR_PRIVATE_QMS_INIT, "SMD"), DDansi_(MVQR_PUBLISH_TABLE_LOCATION, ""), DDkwd__(MVQR_PUBLISH_TO, "BOTH"), DDansi_(MVQR_REWRITE_CANDIDATES, ""), XDDkwd__(MVQR_REWRITE_ENABLED_OPTION, "OFF"), // @ZX -- change to ON later XDDui0_5(MVQR_REWRITE_LEVEL, "0"), XDDkwd__(MVQR_REWRITE_SINGLE_TABLE_QUERIES, "ON"), DDkwd__(MVQR_USE_EXTRA_HUB_TABLES, "ON"), DDkwd__(MVQR_USE_RI_FOR_EXTRA_HUB_TABLES, "OFF"), DD_____(MVQR_WORKLOAD_ANALYSIS_MV_NAME, ""), XDDMVA__(MV_AGE, "0 MINUTES"), XDDkwd__(MV_ALLOW_SELECT_SYSTEM_ADDED_COLUMNS, "OFF"), DDkwd__(MV_AS_ROW_TRIGGER, "OFF"), DDkwd__(MV_AUTOMATIC_LOGGABLE_COLUMN_MAINTENANCE, "ON"), DDkwd__(MV_DUMP_DEBUG_INFO, "OFF"), DDkwd__(MV_ENABLE_INTERNAL_REFRESH_SHOWPLAN, "OFF"), DDui___(MV_LOG_CLEANUP_SAFETY_FACTOR, "200"), DDui___(MV_LOG_CLEANUP_USE_MULTI_COMMIT, "1"), SDDkwd__(MV_LOG_PUSH_DOWN_DP2_DELETE, "OFF"), // push down mv logging tp dp2 for delete SDDkwd__(MV_LOG_PUSH_DOWN_DP2_INSERT, "OFF"), // push down mv logging tp dp2 for insert SDDkwd__(MV_LOG_PUSH_DOWN_DP2_UPDATE, "ON"), // push down mv logging tp dp2 for update SDDui___(MV_REFRESH_MAX_PARALLELISM, "0"), DDui___(MV_REFRESH_MAX_PIPELINING, "0"), DDint__(MV_REFRESH_MDELTA_MAX_DELTAS_THRESHOLD, "31"), DDint__(MV_REFRESH_MDELTA_MAX_JOIN_SIZE_FOR_SINGLE_PHASE, "3"), DDint__(MV_REFRESH_MDELTA_MIN_JOIN_SIZE_FOR_SINGLE_PRODUCT_PHASE, "8"), DDint__(MV_REFRESH_MDELTA_PHASE_SIZE_FOR_MID_RANGE, "6"), DDkwd__(MV_TRACE_INCONSISTENCY, "OFF"), DDSint__(MXCMP_PRIORITY, "0"), DDSint__(MXCMP_PRIORITY_DELTA, "0"), DDkwd__(NAMETYPE, "ANSI"), DDkwd__(NAR_DEPOBJ_ENABLE, "ON"), DDkwd__(NAR_DEPOBJ_ENABLE2, "ON"), // NATIONAL_CHARSET reuses the "kwd" logic here, w/o having to add any // DF_ token constants (this can be considered either clever or kludgy coding). DDkwd__(NATIONAL_CHARSET, (char *)SQLCHARSETSTRING_UNICODE), // These CQDs are reserved for NCM. These are mostly used for // internal testing, turning on/off features for debugging, and for tuning. // In normal situations, these will not be externalized in keeping // with the very few CQDs philosophy of NCM. // These are applicable only in conjunction with SIMPLE_COST_MODEL 'on'. DDflt__(NCM_CACHE_SIZE_IN_BLOCKS, "52"), DDflt__(NCM_COSTLIMIT_FACTOR, "0.05"), //change to 0.05 DDint__(NCM_ESP_FIXUP_WEIGHT, "300"), DDkwd__(NCM_ESP_STARTUP_FIX, "ON"), DDflt__(NCM_EXCH_MERGE_FACTOR, "0.10"), // change to 0.10 DDkwd__(NCM_EXCH_NDCS_FIX, "ON"), // change to ON DDkwd__(NCM_HBASE_COSTING, "ON"), // change to ON DDkwd__(NCM_HGB_OVERFLOW_COSTING, "ON"), DDkwd__(NCM_HJ_OVERFLOW_COSTING, "ON"), DDflt__(NCM_IND_JOIN_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_IND_JOIN_SELECTIVITY, "1.0"), DDflt__(NCM_IND_SCAN_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_IND_SCAN_SELECTIVITY, "1.0"), DDflt__(NCM_MAP_CPU_FACTOR, "4.0"), DDflt__(NCM_MAP_MSG_FACTOR, "4.0"), DDflt__(NCM_MAP_RANDIO_FACTOR, "4.0"), DDflt__(NCM_MAP_SEQIO_FACTOR, "4.0"), DDflt__(NCM_MDAM_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_MJ_TO_HJ_FACTOR, "0.6"), DDflt__(NCM_NJ_PC_THRESHOLD, "1.0"), DDflt0_(NCM_NJ_PROBES_MAXCARD_FACTOR, "10000"), DDkwd__(NCM_NJ_SEQIO_FIX, "ON"), // change to ON DDint__(NCM_NUM_SORT_RUNS, "4"), DDflt__(NCM_OLTP_ET_THRESHOLD, "60.0"), DDflt__(NCM_PAR_ADJ_FACTOR, "0.10"), DDkwd__(NCM_PAR_GRPBY_ADJ, "ON"), DDkwd__(NCM_PRINT_ROWSIZE, "OFF"), DDflt__(NCM_RAND_IO_ROWSIZE_FACTOR, "0"), DDflt__(NCM_RAND_IO_WEIGHT, "3258"), DDflt__(NCM_SEQ_IO_ROWSIZE_FACTOR, "0"), DDflt__(NCM_SEQ_IO_WEIGHT, "543"), DDflt__(NCM_SERIAL_NJ_FACTOR, "2"), DDflt__(NCM_SGB_TO_HGB_FACTOR, "0.8"), DDkwd__(NCM_SKEW_COST_ADJ_FOR_PROBES, "OFF"), DDkwd__(NCM_SORT_OVERFLOW_COSTING, "ON"), DDflt__(NCM_TUPLES_ROWSIZE_FACTOR, "0.5"), DDflt__(NCM_UDR_NANOSEC_FACTOR, "0.01"), DDkwd__(NCM_USE_HBASE_REGIONS, "ON"), // NESTED_JOINS ON means do NESTED_JOINS XDDkwd__(NESTED_JOINS, "ON"), // max. number of ESPs that will deal with skews for OCR // 0 means to turn off the feature DDintN1__(NESTED_JOINS_ANTISKEW_ESPS , "16"), DDkwd__(NESTED_JOINS_CHECK_LEADING_KEY_SKEW, "OFF"), DDkwd__(NESTED_JOINS_FULL_INNER_KEY, "OFF"), DDkwd__(NESTED_JOINS_KEYLESS_INNERJOINS, "ON"), DDui1__(NESTED_JOINS_LEADING_KEY_SKEW_THRESHOLD, "15"), DDkwd__(NESTED_JOINS_NO_NSQUARE_OPENS, "ON"), DDkwd__(NESTED_JOINS_OCR_GROUPING, "OFF"), // 128X32 being the default threshold for OCR. // 128 partitions per table and 32 ESPs per NJ operator SDDint__(NESTED_JOINS_OCR_MAXOPEN_THRESHOLD, "4096"), // PLAN0 is solely controlled by OCR. If this CQD is off, then // PLAN0 is off unconditionally. This CQD is used by OCR unit test. DDkwd__(NESTED_JOINS_PLAN0, "ON"), // try the explicit sort plan when plan2 produces a non-sort plan DDkwd__(NESTED_JOINS_PLAN3_TRY_SORT, "ON"), // Enable caching for eligible nested joins - see NestedJoin::preCodeGen. DDkwd__(NESTED_JOIN_CACHE, "ON"), // Enable pulling up of predicates into probe cache DDkwd__(NESTED_JOIN_CACHE_PREDS, "ON"), // Nested Join Heuristic DDkwd__(NESTED_JOIN_CONTROL, "ON"), // Allow nested join for cross products DDkwd__(NESTED_JOIN_FOR_CROSS_PRODUCTS, "ON"), DDkwd__(NEW_MDAM, "ON"), DDkwd__(NEW_OPT_DRIVER, "ON"), // Ansi name of the next DEFAULTS table to read in. // Contains blanks, or the name of a DEFAULTS table to read values from next, // after reading all values from this DEFAULTS table. The name may contain // format strings of '%d' and '%u', which are replaced with the domain name // and user name, respectively, of the current user. The name may begin with // '$', in which it is replaced by its value as a SYSTEM environment variable. // This value in turn may contain '%d' and '%u' formats. When these // replacements are complete, the resulting name is qualified by the current // default catalog and schema, if necessary, and the resulting three-part ANSI // table's default values are read in. This table may contain another // NEXT_DEFAULTS_TABLE value, and different default CATALOG and // SCHEMA values to qualify the resulting table name, and so on, allowing a // chain of tables to be read; combined with the format and environment // variable replacements, this allows per-domain, per-system, and per-user // customization of SQL/MX default values. DDansi_(NEXT_DEFAULTS_TABLE, ""), DDui1__(NEXT_VALUE_FOR_BUFFER_SIZE, "10240"), DDui1__(NEXT_VALUE_FOR_NUM_BUFFERS, "3"), DDui1__(NEXT_VALUE_FOR_SIZE_DOWN, "4"), DDui1__(NEXT_VALUE_FOR_SIZE_UP, "2048"), DDflt0_(NJ_CPUCOST_INITIALIZE, ".1"), DDflt0_(NJ_CPUCOST_PASS_ROW, ".02"), DDflte_(NJ_INC_AFTERLIMIT, "0.0055"), DDflte_(NJ_INC_MOVEROWS, "0.0015"), DDflte_(NJ_INC_UPTOLIMIT, "0.0225"), DDui___(NJ_INITIAL_BUFFER_COUNT, "5"), DDui1__(NJ_INITIAL_BUFFER_SIZE, "5"), DDui1__(NJ_MAX_SEEK_DISTANCE, "5000"), // UDF costing CQDs for processing a steady state row DDui___(NORMAL_UDF_CPU_COST, "100"), DDui___(NORMAL_UDF_IO_COST, "0"), DDui___(NORMAL_UDF_MSG_COST, "2"), XDDui30_32000(NOT_ATOMIC_FAILURE_LIMIT, "32000"), //NOT IN ANSI NULL semantics rule DDkwd__(NOT_IN_ANSI_NULL_SEMANTICS, "ON"), //NOT IN optimization DDkwd__(NOT_IN_OPTIMIZATION, "ON"), //NOT IN outer column optimization DDkwd__(NOT_IN_OUTER_OPTIMIZATION, "ON"), // NOT IN skew buster optimization DDkwd__(NOT_IN_SKEW_BUSTER_OPTIMIZATION, "ON"), DDkwd__(NOT_NULL_CONSTRAINT_DROPPABLE_OPTION, "OFF"), DDkwd__(NOWAITED_FIXUP_MESSAGE_TO_DP2, "OFF"), // NSK DEBUG defaults DDansi_(NSK_DBG, "OFF"), DDansi_(NSK_DBG_COMPILE_INSTANCE, "USER"), DDkwd__(NSK_DBG_GENERIC, "OFF"), DDansi_(NSK_DBG_LOG_FILE, ""), DDkwd__(NSK_DBG_MJRULES_TRACKING, "OFF"), DDkwd__(NSK_DBG_PRINT_CHAR_INPUT, "OFF"), DDkwd__(NSK_DBG_PRINT_CHAR_OUTPUT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONSTRAINT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONTEXT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONTEXT_POINTER, "OFF"), DDkwd__(NSK_DBG_PRINT_COST, "OFF"), DDkwd__(NSK_DBG_PRINT_COST_LIMIT, "OFF"), DDkwd__(NSK_DBG_PRINT_INDEX_ELIMINATION, "OFF"), DDkwd__(NSK_DBG_PRINT_ITEM_EXPR, "OFF"), DDkwd__(NSK_DBG_PRINT_LOG_PROP, "OFF"), DDkwd__(NSK_DBG_PRINT_PHYS_PROP, "OFF"), DDkwd__(NSK_DBG_PRINT_TASK, "OFF"), DDkwd__(NSK_DBG_PRINT_TASK_STACK, "OFF"), DDkwd__(NSK_DBG_QUERY_LOGGING_ONLY, "OFF"), DDansi_(NSK_DBG_QUERY_PREFIX, ""), DDkwd__(NSK_DBG_SHOW_PASS1_PLAN, "OFF"), DDkwd__(NSK_DBG_SHOW_PASS2_PLAN, "OFF"), DDkwd__(NSK_DBG_SHOW_PLAN_LOG, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_ANALYSIS, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_BINDING, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_CODEGEN, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_NORMALIZATION, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_PARSING, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_PRE_CODEGEN, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_SEMANTIC_QUERY_OPTIMIZATION, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_TRANSFORMATION, "OFF"), DDkwd__(NSK_DBG_STRATEGIZER, "OFF"), DDflt0_(NUMBER_OF_PARTITIONS_DEVIATION, "0.25"), DDui1__(NUMBER_OF_ROWS_PARALLEL_THRESHOLD, "5000"), DDui1__(NUMBER_OF_USERS, "1"), DDui1__(NUM_OF_BLOCKS_PER_ACCESS, "SYSTEM"), DDflt0_(NUM_OF_PARTS_DEVIATION_TYPE2_JOINS, "SYSTEM"), DDkwd__(NVCI_PROCESS, "FALSE"), DDflt0_(OCB_COST_ADJSTFCTR, "0.996"), DDui___(OCR_FOR_SIDETREE_INSERT, "1"), DDkwd__(ODBC_METADATA_PROCESS, "FALSE"), DDkwd__(ODBC_PROCESS, "FALSE"), DDflte_(OHJ_BMO_REUSE_SORTED_BMOFACTOR_LIMIT, "3.0"), DDflte_(OHJ_BMO_REUSE_SORTED_UECRATIO_UPPERLIMIT, "0.7"), DDflte_(OHJ_BMO_REUSE_UNSORTED_UECRATIO_UPPERLIMIT, "0.01"), DDflte_(OHJ_VBMOLIMIT, "5.0"), DDui1__(OLAP_BUFFER_SIZE, "262144"), // Do not alter (goes to DP2) DDkwd__(OLAP_CAN_INVERSE_ORDER, "ON"), DDui1__(OLAP_MAX_FIXED_WINDOW_EXTRA_BUFFERS, "2"), DDui1__(OLAP_MAX_FIXED_WINDOW_FRAME, "50000"), DDui1__(OLAP_MAX_NUMBER_OF_BUFFERS, "100000"), DDui___(OLAP_MAX_ROWS_IN_OLAP_BUFFER, "0"), //aplies for fixed window-- number of additional oplap buffers //to allocate on top of the minumum numbers DDkwd__(OLD_HASH2_GROUPING, "FALSE"), DDkwd__(OLT_QUERY_OPT, "ON"), DDkwd__(OLT_QUERY_OPT_LEAN, "OFF"), // ----------------------------------------------------------------------- // Optimizer pruning heuristics. // ----------------------------------------------------------------------- DDkwd__(OPH_EXITHJCRCONTCHILOOP, "ON"), DDkwd__(OPH_EXITMJCRCONTCHILOOP, "ON"), DDkwd__(OPH_EXITNJCRCONTCHILOOP, "OFF"), DDkwd__(OPH_PRUNE_WHEN_COST_LIMIT_EXCEEDED, "OFF"), DDflt__(OPH_PRUNING_COMPLEXITY_THRESHOLD, "10.0"), DDflt__(OPH_PRUNING_PASS2_COST_LIMIT, "-1.0"), DDkwd__(OPH_REDUCE_COST_LIMIT_FROM_CANDIDATES, "OFF"), DDkwd__(OPH_REDUCE_COST_LIMIT_FROM_PASS1_SOLUTION, "ON"), DDkwd__(OPH_REUSE_FAILED_PLAN, "ON"), DDkwd__(OPH_REUSE_OPERATOR_COST, "OFF"), DDkwd__(OPH_SKIP_OGT_FOR_SHARED_GC_FAILED_CL, "OFF"), DDkwd__(OPH_USE_CACHED_ELAPSED_TIME, "ON"), DDkwd__(OPH_USE_CANDIDATE_PLANS, "OFF"), DDkwd__(OPH_USE_COMPARE_COST_THRESHOLD, "ON"), DDkwd__(OPH_USE_CONSERVATIVE_COST_LIMIT, "OFF"), DDkwd__(OPH_USE_ENFORCER_PLAN_PROMOTION, "OFF"), DDkwd__(OPH_USE_FAILED_PLAN_COST, "ON"), DDkwd__(OPH_USE_NICE_CONTEXT, "OFF"), DDkwd__(OPH_USE_ORDERED_MJ_PRED, "OFF"), DDkwd__(OPH_USE_PWS_FLAG_FOR_CONTEXT, "OFF"), XDDui___(OPI_ERROR73_RETRIES, "10"), DDflt__(OPTIMIZATION_BUDGET_FACTOR, "5000"), DDkwd__(OPTIMIZATION_GOAL, "LASTROW"), XDDkwd__(OPTIMIZATION_LEVEL, "3"), DDpct__(OPTIMIZATION_LEVEL_1_CONSTANT_1, "50"), DDpct__(OPTIMIZATION_LEVEL_1_CONSTANT_2, "0"), DDui1__(OPTIMIZATION_LEVEL_1_IMMUNITY_LIMIT, "5000"), DDui1__(OPTIMIZATION_LEVEL_1_MJENUM_LIMIT, "20"), DDui1__(OPTIMIZATION_LEVEL_1_SAFETY_NET, "30000"), DDflt__(OPTIMIZATION_LEVEL_1_SAFETY_NET_MULTIPLE, "3.0"), DDui1__(OPTIMIZATION_LEVEL_1_THRESHOLD, "1000"), DDui1__(OPTIMIZATION_TASKS_LIMIT, "2000000000"), DDui1__(OPTIMIZATION_TASK_CAP, "30000"), // Optimizer Graceful Termination: // 1=> randomProbabilistic pruning // > 1 pruning based on potential DDui1__(OPTIMIZER_GRACEFUL_TERMINATION, "2"), DDkwd__(OPTIMIZER_HEURISTIC_1, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_2, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_3, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_4, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_5, "OFF"), // Tells the compiler to print costing information DDkwd__(OPTIMIZER_PRINT_COST, "OFF"), // Tells the compiler to issue a warning with its internal counters DDkwd__(OPTIMIZER_PRINT_INTERNAL_COUNTERS, "OFF"), // Pruning is OFF because of bugs, turn to ON when bugs are fixed // (03/03/98) SDDkwd__(OPTIMIZER_PRUNING, "ON"), DDkwd__(OPTIMIZER_PRUNING_FIX_1, "ON"), //change to ON DDkwd__(OPTIMIZER_SYNTH_FUNC_DEPENDENCIES, "ON"), //OPTS_PUSH_DOWN_DAM made external RV 06/21/01 CR 10-010425-2440 DDui___(OPTS_PUSH_DOWN_DAM, "0"), DDkwd__(ORDERED_HASH_JOIN_CONTROL, "ON"), SDDkwd__(OR_OPTIMIZATION, "ON"), DDkwd__(OR_PRED_ADD_BLOCK_TO_IN_LIST, "ON"), DDkwd__(OR_PRED_KEEP_CAST_VC_UCS2, "ON"), // controls the jump table method of evaluating an or pred. in a scan node // 0 => feature is OFF, positive integer denotes max OR pred that will be // processed through a jump table. DDint__(OR_PRED_TO_JUMPTABLE, "2000"), // controls semijoin method of evaluating an or pred. // 0 => feature is OFF, positive number means if pred do not cover key cols // and jump table is not available, then the transformation is done if // inlist is larger than this value. DDint__(OR_PRED_TO_SEMIJOIN, "100"), // Ratio of tablesize (without application of any preds)to probes below // which semijoin trans. is favoured. DDflt0_(OR_PRED_TO_SEMIJOIN_PROBES_MAX_RATIO, "0.001"), // Minimum table size beyond which semijoin trans. is considered DDint__(OR_PRED_TO_SEMIJOIN_TABLE_MIN_SIZE, "10000"), // The Optimizer Simulator (OSIM) CQDs DDkwd__(OSIM_USE_POS, "OFF"), DDint__(OSIM_USE_POS_DISK_SIZE_GB, "0"), DD_____(OSIM_USE_POS_NODE_NAMES, ""), DDui2__(OS_MESSAGE_BUFFER_SIZE, "32"), // if set to "ansi", datetime output is in ansi format. Currently only // used in special_1 mode if the caller needs datetime value in // ansi format (like, during upd stats). DDansi_(OUTPUT_DATE_FORMAT, ""), // Overflow mode for scratch files DDkwd__(OVERFLOW_MODE, "DISK"), // Sequence generator override identity values DDkwd__(OVERRIDE_GENERATED_IDENTITY_VALUES, "OFF"), // allow users to specify a source schema to be // replaced by a target schema SDDosch_(OVERRIDE_SCHEMA, ""), // Allows users to specify their own SYSKEY value. In other words // the system does not generate one for them. // Prior to this CQD, pm_regenerate_syskey_for_insert was being used // to preserve the syskey. Carrying over these comments from // pm_regenerate_syskey_for_insert // For audited target partition, PM does the copy in multiple transactions // In each transaction PM does a insert/select from the source to the target // partition. The clustering key values from the last row of a transaction // is used as begin key value for the next transaction. If the table // has a syskey then it gets regenerated and last row contains the new // value for the syskey. This obviously causes us to start at a different // place then we intended to start from. The following default when set // to off forces the engine to not regenerate syskey. DDkwd__(OVERRIDE_SYSKEY, "OFF"), DDui___(PARALLEL_ESP_NODEMASK, "0"), // by default all parallelism heuristics are switched ON. DDkwd__(PARALLEL_HEURISTIC_1, "ON"), DDkwd__(PARALLEL_HEURISTIC_2, "ON"), DDkwd__(PARALLEL_HEURISTIC_3, "ON"), DDkwd__(PARALLEL_HEURISTIC_4, "ON"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs. XDDui1__(PARALLEL_NUM_ESPS, "SYSTEM"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs to be used for parallel ddl // operations. DDui1__(PARALLEL_NUM_ESPS_DDL, "SYSTEM"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs to be used for parallel purgedata // operation. DDui1__(PARALLEL_NUM_ESPS_PD, "SYSTEM"), // is partial sort applicable; if so adjust sort cost accordingly DDflt0_(PARTIAL_SORT_ADJST_FCTR, "1"), DDint__(PARTITIONING_SCHEME_SHARING, "1"), // The optimal number of partition access nodes for a process. // NOTE: Setting this to anything other than 1 will cause problems // with Cascades plan stealing! Don't do it unless you have to! DDui1__(PARTITION_ACCESS_NODES_PER_ESP, "1"), DD_____(PCODE_DEBUG_LOGDIR, "" ), // Pathname of log directory for PCode work DDint__(PCODE_EXPR_CACHE_CMP_ONLY, "0" ), // PCode Expr Cache compare-only mode DDint__(PCODE_EXPR_CACHE_DEBUG, "0" ), // PCode Expr Cache debug (set to 1 to enable dbg logging) DDint__(PCODE_EXPR_CACHE_ENABLED, "0" ), // PCode Expr Cache Enabled (set to 0 to disable the cache) DD0_10485760(PCODE_EXPR_CACHE_SIZE,"2000000"), // PCode Expr Cache Max Size // Maximum number of PCODE Branch Instructions in an Expr // for which we will attempt PCODE optimizations. // NOTE: Default value reduced to 12000 for Trafodion to avoid stack // overflow in PCODE optimization where recursion is used. DDint__(PCODE_MAX_OPT_BRANCH_CNT, "12000"), // Maximum number of PCODE Instructions in an Expr // for which we will attempt PCODE optimizations. DDint__(PCODE_MAX_OPT_INST_CNT, "50000"), DDint__(PCODE_NE_DBG_LEVEL, "-1"), // Native Expression Debug Level DDint__(PCODE_NE_ENABLED, "1" ), // Native Expressions Enabled DDkwd__(PCODE_NE_IN_SHOWPLAN, "ON"), // Native Expression in Showplan output // This PCODE_NE_LOG_PATH cqd is now obsolete. Use PCODE_DEBUG_LOGDIR instead. // Would delete the following line except that would also mean deleting the // corresponding line in DefaultConstants.h which would change the values for // the following definitions in the same enum. DD_____(PCODE_NE_LOG_PATH, "" ), // Pathname of log file for Native Expression work - OBSOLETE DDint__(PCODE_OPT_FLAGS, "60"), DDkwd__(PCODE_OPT_LEVEL, "MAXIMUM"), DDint__(PHY_MEM_CONTINGENCY_MB, "3072"), DDkwd__(PLAN_STEALING, "ON"), DDui50_4194303(PM_OFFLINE_TRANSACTION_GRANULARITY, "5000"), DDui50_4194303(PM_ONLINE_TRANSACTION_GRANULARITY, "400"), // Not in use anymore. OVERRIDE_SYSKEY is used instead. DDkwd__(PM_REGENERATE_SYSKEY_FOR_INSERT, "ON"), // Partition OVerlay Support (POS) options SDDkwd__(POS, "DISK_POOL"), XDDpos__(POS_ABSOLUTE_MAX_TABLE_SIZE, ""), DDkwd__(POS_ALLOW_NON_PK_TABLES, "OFF"), DDui___(POS_CPUS_PER_SEGMENT, "16"), // default to 300 GB DDui___(POS_DEFAULT_LARGEST_DISK_SIZE_GB, "300"), // default to 72GB DDui___(POS_DEFAULT_SMALLEST_DISK_SIZE_GB, "72"), DD_____(POS_DISKS_IN_SEGMENT, ""), DD_____(POS_DISK_POOL, "0"), DD_____(POS_FILE_OPTIONS, ""), DD_____(POS_LOCATIONS, ""), DDkwd__(POS_MAP_HASH_TO_HASH2, "ON"), DDpos__(POS_MAX_EXTENTS, ""), SDDui___(POS_NUM_DISK_POOLS, "0"), DDui___(POS_NUM_OF_PARTNS, "SYSTEM"), SDDint__(POS_NUM_OF_TEMP_TABLE_PARTNS, "SYSTEM"), SDDpos__(POS_PRI_EXT_SIZE, "25"), DDkwd__(POS_RAISE_ERROR, "OFF"), SDDpos__(POS_SEC_EXT_SIZE, ""), SDDpos__(POS_TABLE_SIZE, ""), SDDpct__(POS_TEMP_TABLE_FREESPACE_THRESHOLD_PERCENT, "0"), DD_____(POS_TEMP_TABLE_LOCATIONS, ""), SDDpos__(POS_TEMP_TABLE_SIZE, ""), DDkwd__(POS_TEST_MODE, "OFF"), DDui___(POS_TEST_NUM_NODES, "0"), DDui___(POS_TEST_NUM_VOLUMES_PER_NODE, "0"), // Use info from right child to require order on left child of NJ //PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN made external RV 06/21/01 CR 10-010425-2440 DDkwd__(PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN, "OFF"), DD0_18(PRESERVE_MIN_SCALE, "0"), DDkwd__(PRIMARY_KEY_CONSTRAINT_DROPPABLE_OPTION, "OFF"), DDkwd__(PSHOLD_CLOSE_ON_ROLLBACK, "OFF"), DDkwd__(PSHOLD_UPDATE_BEFORE_FETCH, "OFF"), SDDpsch_(PUBLIC_SCHEMA_NAME, ""), XDDrlis_(PUBLISHING_ROLES, ""), DDkwd__(PURGEDATA_WITH_OFFLINE_TABLE, "OFF"), // Query Invalidation - Debug/Regression test CQDs -- DO NOT externalize these DD_____(QI_PATH, "" ), // Specifies cat.sch.object path for object to have cache entries removed DD0_255(QI_PRIV, "0"), // Note: 0 disables the Debug Mechanism. Set non-zero to kick out cache entries. // Then set back to 0 *before* setting to a non-zero value again. // Do the query analysis phase DDkwd__(QUERY_ANALYSIS, "ON"), // query_cache max should be 200 MB. Set it 0 to turn off query cache //XDD0_200000(QUERY_CACHE, "0"), XDD0_200000(QUERY_CACHE, "16384"), // the initial average plan size (in kbytes) to use for configuring the // number of hash buckets to use for mxcmp's hash table of cached plans DD1_200000(QUERY_CACHE_AVERAGE_PLAN_SIZE, "30"), // literals longer than this are not parameterized DDui___(QUERY_CACHE_MAX_CHAR_LEN, "32000"), // a query with more than QUERY_CACHE_MAX_EXPRS ExprNodes is not cacheable DDint__(QUERY_CACHE_MAX_EXPRS, "1000"), // the largest number of cache entries that an unusually large cache // entry is allowed to displace from mxcmp's cache of query plans DD0_200000(QUERY_CACHE_MAX_VICTIMS, "10"), DD0_255(QUERY_CACHE_REQUIRED_PREFIX_KEYS, "255"), DDkwd__(QUERY_CACHE_RUNTIME, "ON"), SDDflt0_(QUERY_CACHE_SELECTIVITY_TOLERANCE, "0"), // query cache statement pinning is off by default DDkwd__(QUERY_CACHE_STATEMENT_PINNING, "OFF"), DDkwd__(QUERY_CACHE_STATISTICS, "OFF"), DD_____(QUERY_CACHE_STATISTICS_FILE, "qcachsts"), DDkwd__(QUERY_CACHE_TABLENAME, "OFF"), DDkwd__(QUERY_CACHE_USE_CONVDOIT_FOR_BACKPATCH, "ON"), // Limit CPU time a query can use in master or any ESP. Unit is seconds. XDDint__(QUERY_LIMIT_SQL_PROCESS_CPU, "0"), // Extra debugging info for QUERY_LIMIT feature. DDkwd__(QUERY_LIMIT_SQL_PROCESS_CPU_DEBUG, "OFF"), // How many iterations in scheduler subtask list before evaluating limits. DDint__(QUERY_LIMIT_SQL_PROCESS_CPU_DP2_FREQ, "16"), // For X-prod HJ: (# of rows joined * LIMIT) before preempt. DDint__(QUERY_LIMIT_SQL_PROCESS_CPU_XPROD, "10000"), // controls various expr optimizations based on bit flags. // see enum QueryOptimizationOptions in DefaultConstants.h DDint__(QUERY_OPTIMIZATION_OPTIONS, "3"), DDkwd__(QUERY_STRATEGIZER, "ON"), DDflt0_(QUERY_STRATEGIZER_2N_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_EXHAUSTIVE_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N2_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N3_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N4_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N_COMPLEXITY_FACTOR, "1"), DDkwd__(QUERY_TEMPLATE_CACHE, "ON"), DDkwd__(QUERY_TEXT_CACHE, "SYSTEM"), DDkwd__(R2_HALLOWEEN_SUPPORT, "OFF"), DDkwd__(RANGESPEC_TRANSFORMATION, "ON"), // RangeSpec Transformation CQD. // To be ANSI compliant you would have to set this default to 'FALSE' DDkwd__(READONLY_CURSOR, "TRUE"), DDflt0_(READ_AHEAD_MAX_BLOCKS, "16.0"), // OFF means Ansi/NIST setting, ON is more similar to the SQL/MP behavior DDkwd__(RECOMPILATION_WARNINGS, "OFF"), // CLI caller to redrive CTAS(create table as) for child query monitoring DDkwd__(REDRIVE_CTAS, "OFF"), // The group by reduction for pushing a partial group by past the // right side of the TSJ must be at least this much. If 0.0, then // pushing it will always be tried. DDflt0_(REDUCTION_TO_PUSH_GB_PAST_TSJ, "0.0000000001"), // This is the code base for the calibration machine. It must be either // "DEBUG" or "RELEASE" // History: // Before 02/01/99: DEBUG DDkwd__(REFERENCE_CODE, "RELEASE"), // This is the frequency of the representative CPU of the base calibration // cluster. // REFERENCE_CPU_FREQUENCY units are MhZ DDflte_(REFERENCE_CPU_FREQUENCY, "199."), // This is the seek time of the representative disk of the base // calibration cluster. // REFERENCE_IO_SEEK_TIME units are seconds DDflte_(REFERENCE_IO_SEEK_TIME, "0.0038"), // This is the sequential transfer rate for the representative // disk of the base calibration cluster. // REFERENCE_IO_SEQ_READ_RATE units are Mb/Sec DDflte_(REFERENCE_IO_SEQ_READ_RATE, "50.0"), // This is the transfer rate for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_LOCAL_RATE units are Mb/Sec DDflte_(REFERENCE_MSG_LOCAL_RATE, "10."), // This is the timeper local msg for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_LOCAL_TIME units are seconds DDflte_(REFERENCE_MSG_LOCAL_TIME, "0.000125"), // This is the transfer rate for the connection among clusters // in the base calibration cluster (this only applies to NSK) // REFERENCE_MSG_REMOTE_RATE units are Mb/Sec DDflte_(REFERENCE_MSG_REMOTE_RATE, "1."), // This is the time per remote msg for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_REMOTE_TIME units are seconds DDflte_(REFERENCE_MSG_REMOTE_TIME, "0.00125"), DDkwd__(REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT, "SYSTEM"), DDkwd__(REMOTE_ESP_ALLOCATION, "SYSTEM"), DDkwd__(REORG_IF_NEEDED, "OFF"), DDkwd__(REORG_VERIFY, "OFF"), DDrlis_(REPLICATE_ALLOW_ROLES, ""), // Determines the compression type to be used with DDL when replicating DDkwd__(REPLICATE_COMPRESSION_TYPE, "SYSTEM"), // Determines if DISK POOL setting should be passed with DDL when replicating DDkwd__(REPLICATE_DISK_POOL, "ON"), // Display a BDR-internally-generated command before executing it DDkwd__(REPLICATE_DISPLAY_INTERNAL_CMD, "OFF"), // Executing commands generated internally by BDR DDkwd__(REPLICATE_EXEC_INTERNAL_CMD, "OFF"), // VERSION of the message from the source system to maintain compatibility // This version should be same as REPL_IO_VERSION_CURR in executor/ExeReplInterface.h // Make changes accordingly in validataorReplIoVersion validator DDrver_(REPLICATE_IO_VERSION, "17"), DDansi_(REPLICATE_MANAGEABILITY_CATALOG, "MANAGEABILITY"), // max num of retries after replicate server(mxbdrdrc) returns an error DDui___(REPLICATE_NUM_RETRIES, "0"), DDansi_(REPLICATE_TEST_TARGET_CATALOG, ""), DDansi_(REPLICATE_TEST_TARGET_MANAGEABILITY_CATALOG, ""), DDkwd__(REPLICATE_WARNINGS, "OFF"), DDkwd__(RETURN_AVG_STREAM_WAIT, "OFF"), DDkwd__(REUSE_BASIC_COST, "ON"), // if set, tables are not closed at the end of a query. This allows // the same open to be reused for the next query which accesses that // table. // If the table is shared opened by multiple openers from the same // process, then the share count is decremented until it reaches 1. // At that time, the last open is preserved so it could be reused. // Tables are closed if user id changes. DDkwd__(REUSE_OPENS, "ON"), // multiplicative factor used to inflate cost of risky operators. // = 1.0 means do not demand an insurance premium from risky operators. // = 1.2 means demand a 20% insurance premium that cost of risky operators // must overcome before they will be chosen over less-risky operators. DDflt0_(RISK_PREMIUM_MJ, "1.15"), XDDflt0_(RISK_PREMIUM_NJ, "1.0"), XDDflt0_(RISK_PREMIUM_SERIAL, "1.0"), XDDui___(RISK_PREMIUM_SERIAL_SCALEBACK_MAXCARD_THRESHOLD, "10000"), DDflt0_(ROBUST_HJ_TO_NJ_FUDGE_FACTOR, "0.0"), DDflt0_(ROBUST_PAR_GRPBY_EXCHANGE_FCTR, "0.25"), DDflt0_(ROBUST_PAR_GRPBY_LEAF_FCTR, "0.25"), // external master CQD that sets following internal CQDs // robust_query_optimization // MINIMUM SYSTEM HIGH MAXIMUM // risk_premium_NJ 1.0 system 2.5 5.0 // risk_premium_SERIAL 1.0 system 1.5 2.0 // partitioning_scheme_sharing 0 system 2 2 // robust_hj_to_nj_fudge_factor 0.0 system 3.0 1.0 // robust_sortgroupby 0 system 2 2 // risk_premium_MJ 1.0 system 1.5 2.0 // see optimizer/ControlDB.cpp ControlDB::doRobustQueryOptimizationCQDs // for the actual cqds that set these values XDDkwd__(ROBUST_QUERY_OPTIMIZATION, "SYSTEM"), // 0: allow sort group by in all // 1: disallow sort group by from partial grpByRoot if no order requirement // 2: disallow sort group by from partial grpByRoot // 3: disallow sort group by in ESP DDint__(ROBUST_SORTGROUPBY, "1"), SDDui___(ROUNDING_MODE, "0"), DDui___(ROUTINE_CACHE_SIZE, "20"), // UDF default Uec DDui___(ROUTINE_DEFAULT_UEC, "1"), DDkwd__(ROUTINE_JOINS_SPOIL_JBB, "OFF"), DDkwd__(ROWSET_ROW_COUNT, "OFF"), DDint__(SAP_KEY_NJ_TABLE_SIZE_THRESHOLD, "10000000"), DDkwd__(SAP_PA_DP2_AFFINITY_FOR_INSERTS, "ON"), DDkwd__(SAP_PREFER_KEY_NESTED_JOIN, "OFF"), DDint__(SAP_TUPLELIST_SIZE_THRESHOLD, "5000"), XDDkwd__(SAVE_DROPPED_TABLE_DDL, "OFF"), XDDansi_(SCHEMA, "SEABASE"), //specify a : separated list of full path names where scratch files //should reside. Ensure each specified directoy exisst on each node and //Trafodion user has permissions to access them. DD_____(SCRATCH_DIRS, ""), DDkwd__(SCRATCH_DISK_LOGGING, "OFF"), SDDpct__(SCRATCH_FREESPACE_THRESHOLD_PERCENT, "1"), DDui___(SCRATCH_IO_BLOCKSIZE_SORT_MAX, "5242880"), //On LINUX, writev and readv calls are used to perform //scratch file IO. This CQD sets the vector size to use //in writev and readv calls. Overall IO size is affected //by this cqd. Also, related cqds that are related to //IO size are: COMP_INT_67, GEN_HGBY_BUFFER_SIZE. //GEN_HSHJ_BUFFER_SIZE, OLAP_BUFFER_SIZE, //EXE_HGB_INITIAL_HT_SIZE. Vector size is no-op on other //platforms. DDui___(SCRATCH_IO_VECTOR_SIZE_HASH, "8"), DDui___(SCRATCH_IO_VECTOR_SIZE_SORT, "1"), DDui___(SCRATCH_MAX_OPENS_HASH, "1"), DDui___(SCRATCH_MAX_OPENS_SORT, "1"), DDui___(SCRATCH_MGMT_OPTION, "11"), DDkwd__(SCRATCH_PREALLOCATE_EXTENTS, "OFF"), DD_____(SEABASE_CATALOG, TRAFODION_SYSCAT_LIT), DDkwd__(SEABASE_VOLATILE_TABLES, "ON"), // SeaMonster messaging -- the default can be ON, OFF, or SYSTEM. // When the default is SYSTEM we take the setting from env var // SQ_SEAMONSTER which will have a value of 0 or 1. DDkwd__(SEAMONSTER, "SYSTEM"), SDDkwd__(SEMIJOIN_TO_INNERJOIN_TRANSFORMATION, "SYSTEM"), // Disallow/Allow semi and anti-semi joins in MultiJoin framework DDkwd__(SEMI_JOINS_SPOIL_JBB, "OFF"), DDkwd__(SEQUENTIAL_BLOCKSPLIT, "SYSTEM"), DDansi_(SESSION_ID, ""), DDkwd__(SESSION_IN_USE, "OFF"), DDansi_(SESSION_USERNAME, ""), DDflt0_(SGB_CPUCOST_INITIALIZE, ".05"), DDui___(SGB_INITIAL_BUFFER_COUNT, "5."), DDui1__(SGB_INITIAL_BUFFER_SIZE, "5."), DDkwd__(SHAREOPENS_ON_REFCOUNT, "ON"), DDkwd__(SHARE_TEMPLATE_CACHED_PLANS, "ON"), DDui___(SHORT_OPTIMIZATION_PASS_THRESHOLD, "12"), SDDkwd__(SHOWCONTROL_SHOW_ALL, "OFF"), SDDkwd__(SHOWCONTROL_SHOW_SUPPORT, "OFF"), DDkwd__(SHOWDDL_DISPLAY_FORMAT, "EXTERNAL"), DDkwd__(SHOWDDL_DISPLAY_PRIVILEGE_GRANTS, "SYSTEM"), DDint__(SHOWDDL_FOR_REPLICATE, "0"), DDkwd__(SHOWLABEL_LOCKMODE, "OFF"), DDkwd__(SHOWWARN_OPT, "ON"), DDkwd__(SHOW_MEMO_STATS, "OFF"), DDkwd__(SIMPLE_COST_MODEL, "ON"), XDDkwd__(SKEW_EXPLAIN, "ON"), XDDflt__(SKEW_ROWCOUNT_THRESHOLD, "1000000"), // Column row count // threshold below // which skew // buster is disabled. XDDflt__(SKEW_SENSITIVITY_THRESHOLD, "0.1"), DDkwd__(SKIP_METADATA_VIEWS, "OFF"), DDkwd__(SKIP_TRANSLATE_SYSCAT_DEFSCH_NAMES, "ON"), DDkwd__(SKIP_UNAVAILABLE_PARTITION, "OFF"), DDkwd__(SKIP_VCC, "OFF"), DDui0_5(SOFT_REQ_HASH_TYPE, "2"), DDkwd__(SORT_ALGO, "QS"), // Calibration // 01/23/98: 10000 // Original: 10. DDflt0_(SORT_CPUCOST_INITIALIZE, "10000."), DDui1__(SORT_EX_BUFFER_SIZE, "5."), DDkwd__(SORT_INTERMEDIATE_SCRATCH_CLEANUP, "ON"), DDui1__(SORT_IO_BUFFER_SIZE, "128."), DD1_200000(SORT_MAX_HEAP_SIZE_MB, "800"), DDkwd__(SORT_MEMORY_QUOTA_SYSTEM, "ON"), DD1_128(SORT_MERGE_BUFFER_UNIT_56KB, "1"), // Calibration // 04/06/2005: 1.5 DDflte_(SORT_QS_FACTOR, "1.5"), //Maximum records after which sort would switch over to //iterative heap sort. Most often in partial sort, we may want //do a quick sort or similar to avoid larger in-memory sort //setup. DDint__(SORT_REC_THRESHOLD, "1000"), // Calibration DDflte_(SORT_RS_FACTOR, "3.55"), // Calibration // 04/06/2005: 2.1 DDflte_(SORT_RW_FACTOR, "2.1"), DDflte_(SORT_TREE_NODE_SIZE, ".012"), DDkwd__(SQLMX_REGRESS, "OFF"), DDkwd__(SQLMX_SHOWDDL_SUPPRESS_ROW_FORMAT, "OFF"), DDansi_(SQLMX_UTIL_EXPLAIN_PLAN, "OFF"), SDDkwd__(SQLMX_UTIL_ONLINE_POPINDEX, "ON"), SDDui___(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB, "1200"), // BertBert VV // Timeout for a streaming cursor to return to the fetch(), even if no // rows to return. The cursor is NOT closed, it just gives control to // the user again. // "0" means no timeout, just check instead. // "negative" means never timeout. // "positive" means the number of centiseconds to wait before timing out. XDDint__(STREAM_TIMEOUT, "-1"), XDDkwd__(SUBQUERY_UNNESTING, "ON"), DDkwd__(SUBQUERY_UNNESTING_P2, "ON"), DDkwd__(SUBSTRING_TRANSFORMATION, "OFF"), DDui___(SYNCDEPTH, "1"), XDDkwd__(TABLELOCK, "SYSTEM"), // This is the code base for the end user calibration cluster. // It must be either "DEBUG" or "RELEASE" #ifdef NDEBUG DDkwd__(TARGET_CODE, "RELEASE"), #else DDkwd__(TARGET_CODE, "DEBUG"), #endif // This is the frequency of the representative CPU of the end user // cluster. // TARGET_CPU_FREQUENCY units are MhZ. DDflte_(TARGET_CPU_FREQUENCY, "199."), // This is the seek time of the representative disk of the end user // cluster. // TARGET_IO_SEEK_TIME units are seconds DDflte_(TARGET_IO_SEEK_TIME, "0.0038"), // This is the sequential transfer rate for the representative // disk of the end user cluster. // TARGET_IO_SEQ_READ_RATE units are Mb/Sec DDflte_(TARGET_IO_SEQ_READ_RATE, "50.0"), // This is the transfer rate for the fast speed connection of // nodes in the end user cluster. // TARGET_MSG_LOCAL_RATE units are Mb/Sec DDflte_(TARGET_MSG_LOCAL_RATE, "10."), // This is the per msg time for the fast speed connection of // nodes in the end user cluster. // TARGET_MSG_LOCAL_TIME are seconds DDflte_(TARGET_MSG_LOCAL_TIME, "0.000125"), // This is the transfer rate for the connection among clusters // in the end user cluster (this only applies to NSK) // TARGET_MSG_REMOTE_RATE units are Mb/Sec DDflte_(TARGET_MSG_REMOTE_RATE, "1."), // This is the per msg time for the the connection among clusters // nodes in the end user cluster. // TARGET_MSG_REMOTE_TIME are seconds DDflte_(TARGET_MSG_REMOTE_TIME, "0.00125"), DD_____(TEMPORARY_TABLE_HASH_PARTITIONS, "" ), DDkwd__(TERMINAL_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), DDint__(TEST_PASS_ONE_ASSERT_TASK_NUMBER, "-1"), DDint__(TEST_PASS_TWO_ASSERT_TASK_NUMBER, "-1"), XDDintN2(TIMEOUT, "6000"), DDflt0_(TMUDF_CARDINALITY_FACTOR, "1"), DDflt0_(TMUDF_LEAF_CARDINALITY, "1"), DDkwd__(TOTAL_RESOURCE_COSTING, "ON"), DDint__(TRAF_ALIGNED_FORMAT_ADD_COL_METHOD, "2"), DDkwd__(TRAF_ALIGNED_ROW_FORMAT, "ON"), DDkwd__(TRAF_ALLOW_ESP_COLOCATION, "OFF"), DDkwd__(TRAF_ALLOW_RESERVED_COLNAMES, "OFF"), DDkwd__(TRAF_ALLOW_SELF_REF_CONSTR, "ON"), DDkwd__(TRAF_ALTER_COL_ATTRS, "ON"), DDkwd__(TRAF_AUTO_CREATE_SCHEMA, "OFF"), DDkwd__(TRAF_BLOB_AS_VARCHAR, "ON"), //set to OFF to enable Lobs support DDkwd__(TRAF_BOOLEAN_IO, "OFF"), DDkwd__(TRAF_BOOTSTRAP_MD_MODE, "OFF"), DDkwd__(TRAF_CLOB_AS_VARCHAR, "ON"), //set to OFF to enable Lobs support DDkwd__(TRAF_COL_LENGTH_IS_CHAR, "ON"), DDkwd__(TRAF_CREATE_SIGNED_NUMERIC_LITERAL, "ON"), DDansi_(TRAF_CREATE_TABLE_WITH_UID, ""), DDkwd__(TRAF_CREATE_TINYINT_LITERAL, "ON"), DDkwd__(TRAF_DEFAULT_COL_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), DDkwd__(TRAF_ENABLE_ORC_FORMAT, "OFF"), DDkwd__(TRAF_HBASE_MAPPED_TABLES, "ON"), DDkwd__(TRAF_HBASE_MAPPED_TABLES_IUD, "OFF"), DDkwd__(TRAF_INDEX_ALIGNED_ROW_FORMAT, "ON"), DDkwd__(TRAF_INDEX_CREATE_OPT, "OFF"), DDkwd__(TRAF_LARGEINT_UNSIGNED_IO, "OFF"), DDkwd__(TRAF_LOAD_ALLOW_RISKY_INDEX_MAINTENANCE, "OFF"), DDkwd__(TRAF_LOAD_CONTINUE_ON_ERROR, "OFF"), DD_____(TRAF_LOAD_ERROR_COUNT_ID, "" ), DD_____(TRAF_LOAD_ERROR_COUNT_TABLE, "ERRORCOUNTER" ), DD_____(TRAF_LOAD_ERROR_LOGGING_LOCATION, "/user/trafodion/bulkload/logs" ), DDint__(TRAF_LOAD_FLUSH_SIZE_IN_KB, "1024"), DDkwd__(TRAF_LOAD_FORCE_CIF, "ON"), DDkwd__(TRAF_LOAD_LOG_ERROR_ROWS, "OFF"), DDint__(TRAF_LOAD_MAX_ERROR_ROWS, "0"), DDint__(TRAF_LOAD_MAX_HFILE_SIZE, "10240"), // in MB -->10GB by default DDkwd__(TRAF_LOAD_PREP_ADJUST_PART_FUNC, "ON"), DDkwd__(TRAF_LOAD_PREP_CLEANUP, "ON"), DDkwd__(TRAF_LOAD_PREP_KEEP_HFILES, "OFF"), DDkwd__(TRAF_LOAD_PREP_PHASE_ONLY, "OFF"), DDkwd__(TRAF_LOAD_PREP_SKIP_DUPLICATES , "OFF"), //need add code to check if folder exists or not. if not issue an error and ask //user to create it DD_____(TRAF_LOAD_PREP_TMP_LOCATION, "/user/trafodion/bulkload/" ), DDkwd__(TRAF_LOAD_TAKE_SNAPSHOT , "OFF"), DDkwd__(TRAF_LOAD_USE_FOR_INDEXES, "ON"), DDkwd__(TRAF_LOAD_USE_FOR_STATS, "OFF"), // max size in bytes of a char or varchar column. Set to 16M DDui___(TRAF_MAX_CHARACTER_COL_LENGTH, MAX_CHAR_COL_LENGTH_IN_BYTES_STR), DDkwd__(TRAF_MAX_CHARACTER_COL_LENGTH_OVERRIDE, "OFF"), DDkwd__(TRAF_MULTI_COL_FAM, "ON"), DDkwd__(TRAF_NO_CONSTR_VALIDATION, "OFF"), DDkwd__(TRAF_NO_DTM_XN, "OFF"), DDint__(TRAF_NUM_HBASE_VERSIONS, "0"), DDint__(TRAF_NUM_OF_SALT_PARTNS, "-1"), DDkwd__(TRAF_READ_OBJECT_DESC, "OFF"), DDkwd__(TRAF_RELOAD_NATABLE_CACHE, "OFF"), DD_____(TRAF_SAMPLE_TABLE_LOCATION, "/user/trafodion/sample/"), DDint__(TRAF_SEQUENCE_CACHE_SIZE, "-1"), DDkwd__(TRAF_SIMILARITY_CHECK, "ROOT"), DDkwd__(TRAF_STORE_OBJECT_DESC, "OFF"), DDkwd__(TRAF_STRING_AUTO_TRUNCATE, "OFF"), DDkwd__(TRAF_STRING_AUTO_TRUNCATE_WARNING, "OFF"), //TRAF_TABLE_SNAPSHOT_SCAN CQD can be set to : //NONE--> Snapshot scan is disabled and regular scan is used , //SUFFIX --> Snapshot scan enabled for the bulk unload (bulk unload // behavior id not changed) //LATEST --> enabled for the scan independently from bulk unload // the latest snapshot is used if it exists DDkwd__(TRAF_TABLE_SNAPSHOT_SCAN, "NONE"), DD_____(TRAF_TABLE_SNAPSHOT_SCAN_SNAP_SUFFIX, "SNAP"), //when the estimated table size is below the threshold (in MBs) //defined by TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD //regular scan instead of snapshot scan //does not apply to bulk unload which maintains the old behavior DDint__(TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD, "1000"), //timeout before we give up when trying to create the snapshot scanner DDint__(TRAF_TABLE_SNAPSHOT_SCAN_TIMEOUT, "6000"), //location for temporary links and files produced by snapshot scan DD_____(TRAF_TABLE_SNAPSHOT_SCAN_TMP_LOCATION, "/user/trafodion/bulkload/"), DDkwd__(TRAF_TINYINT_INPUT_PARAMS, "OFF"), DDkwd__(TRAF_TINYINT_RETURN_VALUES, "OFF"), DDkwd__(TRAF_TINYINT_SPJ_SUPPORT, "OFF"), DDkwd__(TRAF_TINYINT_SUPPORT, "ON"), // DTM Transaction Type: MVCC, SSCC XDDkwd__(TRAF_TRANS_TYPE, "MVCC"), DD_____(TRAF_UNLOAD_DEF_DELIMITER, "|" ), DD_____(TRAF_UNLOAD_DEF_RECORD_SEPARATOR, "\n" ), DDint__(TRAF_UNLOAD_HDFS_COMPRESS, "0"), DDkwd__(TRAF_UPSERT_ADJUST_PARAMS, "OFF"), DDkwd__(TRAF_UPSERT_MODE, "MERGE"), DDkwd__(TRAF_UPSERT_TO_EFF_TREE, "ON"), DDint__(TRAF_UPSERT_WB_SIZE, "2097152"), DDkwd__(TRAF_UPSERT_WRITE_TO_WAL, "OFF"), DDkwd__(TRAF_USE_REGION_XN, "OFF"), DDkwd__(TRAF_USE_RWRS_FOR_MD_INSERT, "ON"), DDkwd__(TRANSLATE_ERROR, "ON"), DDkwd__(TRANSLATE_ERROR_UNICODE_TO_UNICODE, "ON"), DDkwd__(TRY_DP2_REPARTITION_ALWAYS, "OFF"), SDDkwd__(TRY_PASS_ONE_IF_PASS_TWO_FAILS, "OFF"), // Disallow/Allow TSJs in MultiJoin framework DDkwd__(TSJS_SPOIL_JBB, "OFF"), // type a CASE expression or ValueIdUnion as varchar if its leaves // are of type CHAR of unequal length DDkwd__(TYPE_UNIONED_CHAR_AS_VARCHAR, "ON"), // UDF scalar indicating maximum number of rows out for each row in. DDui___(UDF_FANOUT, "1"), // Must be in form <cat>.<sch>. Delimited catalog names not allowed. DD_____(UDF_METADATA_SCHEMA, "TRAFODION.\"_UDF_\""), DDkwd__(UDF_SUBQ_IN_AGGS_AND_GBYS, "SYSTEM"), XDDui___(UDR_DEBUG_FLAGS, "0"), // see sqludr/sqludr.h for values SDD_____(UDR_JAVA_OPTIONS, "OFF"), DD_____(UDR_JAVA_OPTION_DELIMITERS, " "), XDDui___(UDR_JVM_DEBUG_PORT, "0"), XDDui___(UDR_JVM_DEBUG_TIMEOUT, "0"), DDkwd__(UNAVAILABLE_PARTITION, "STOP"), // "?" used? DDkwd__(UNC_PROCESS, "OFF"), SDDkwd__(UNIQUE_HASH_JOINS, "SYSTEM"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_SIZE, "1000"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_SIZE_PER_INSTANCE, "100"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_TABLES, "2"), DDui___(UNOPTIMIZED_ESP_BUFFER_SIZE_DOWN, "31000"), DDui___(UNOPTIMIZED_ESP_BUFFER_SIZE_UP, "31000"), DDui1__(UPDATED_BYTES_PER_ESP, "400000"), DDkwd__(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY,"ON"), DDkwd__(UPD_ABORT_ON_ERROR, "OFF"), XDDkwd__(UPD_ORDERED, "ON"), DDkwd__(UPD_PARTIAL_ON_ERROR, "OFF"), DDkwd__(UPD_SAVEPOINT_ON_ERROR, "ON"), DDkwd__(USER_EXPERIENCE_LEVEL, "BEGINNER"), // ------------------------------------------------------------------------ // This default will use a new type of an ASSERT, CCMPASSERT as a CMPASSERT // when ON, else use that as a DCMPASSERT. Changed this default to OFF // just before the final build for R2 07/23/2004 RV // ------------------------------------------------------------------------- DDkwd__(USE_CCMPASSERT_AS_CMPASSERT, "OFF"), DDkwd__(USE_DENSE_BUFFERS, "ON"), // Use Hive tables as source for traf ustat and popindex DDkwd__(USE_HIVE_SOURCE, ""), // Use large queues on RHS of Flow/Nested Join when appropriate DDkwd__(USE_LARGE_QUEUES, "ON"), DDkwd__(USE_MAINTAIN_CONTROL_TABLE, "OFF"), DDkwd__(USE_OLD_DT_CONSTRUCTOR, "OFF"), // Adaptive segmentation, use operator max to determine degree of parallelism DDui___(USE_OPERATOR_MAX_FOR_DOP, "1"), // Specify the number of partitions before invoking parallel label operations DDui1__(USE_PARALLEL_FOR_NUM_PARTITIONS, "32"), DDkwd__(USTAT_ADD_SALTED_KEY_PREFIXES_FOR_MC, "ON"), // When ON, generate MCs for primary key prefixes as well as full key // of salted table when ON EVERY KEY or ON EVERY COLUMN is specified. DDkwd__(USTAT_ATTEMPT_ESP_PARALLELISM, "ON"), // for reading column values DDui___(USTAT_AUTOMATION_INTERVAL, "0"), XDDflt0_(USTAT_AUTO_CV_SAMPLE_SLOPE, "0.5"), // CV multiplier for sampling %. DDkwd__(USTAT_AUTO_EMPTYHIST_TWO_TRANS, "OFF"), // When ON empty hist insert will be 2 trans. DDkwd__(USTAT_AUTO_FOR_VOLATILE_TABLES, "OFF"), // Toggle for vol tbl histogram usage DDui___(USTAT_AUTO_MAX_HIST_AGE, "0"), // Age of oldest unused histogram - only applies when automation is on. DDui1__(USTAT_AUTO_MC_MAX_WIDTH, "10"), // The max columns in an MC histogram for automation. DDui___(USTAT_AUTO_MISSING_STATS_LEVEL, "4"), // Similar to HIST_MISSING_STATS_WARNING_LEVEL, but controls // if automation inserts missing stats to HISTOGRAMS table. // 0 - insert no stats, // 1 - insert single col hists, // 2 - insert all single col hists and MC hists for scans, // 3 - insert all single col hists and MC stats for scans and joins. // 4 - insert all single col hists and MC stats for scans, joins, and groupbys. XDDui___(USTAT_AUTO_PRIORITY, "150"), // Priority of ustats under USAS. DDui1__(USTAT_AUTO_READTIME_UPDATE_INTERVAL, "86400"), // Seconds between updates of READ_COUNT. // Should be > CACHE_HISTOGRAMS_REFRESH_INTERVAL. DDkwd__(USTAT_CHECK_HIST_ACCURACY, "OFF"), DDui1__(USTAT_CLUSTER_SAMPLE_BLOCKS, "1"), DDkwd__(USTAT_COLLECT_FILE_STATS, "ON"), // do we collect file stats DDkwd__(USTAT_COLLECT_MC_SKEW_VALUES, "OFF"), DDkwd__(USTAT_COMPACT_VARCHARS, "OFF"), // If on, internal sort does not pad out varchars DD_____(USTAT_CQDS_ALLOWED_FOR_SPAWNED_COMPILERS, ""), // list of CQDs that can be pushed to seconday compilers // CQDs are delimited by "," DD_____(USTAT_DEBUG_TEST, ""), DDkwd__(USTAT_DELETE_NO_ROLLBACK, "ON"), // If ON, use DELETE WITH NO ROLLBACK in IUS when updating sample table DDflte_(USTAT_DSHMAX, "50.0"), DDkwd__(USTAT_ESTIMATE_HBASE_ROW_COUNT, "ON"), DDkwd__(USTAT_FETCHCOUNT_ACTIVE, "OFF"), DDkwd__(USTAT_FORCE_MOM_ESTIMATOR, "OFF"), DDkwd__(USTAT_FORCE_TEMP, "OFF"), DDflt0_(USTAT_FREQ_SIZE_PERCENT, "0.5"), // >100 effectively disables DDflt0_(USTAT_GAP_PERCENT, "10.0"), DDflt0_(USTAT_GAP_SIZE_MULTIPLIER, "1.5"), DDui___(USTAT_HBASE_SAMPLE_RETURN_INTERVAL, "10000000"), // Avoid scanner timeout by including on average at // least one row per this many when sampling within HBase. DDflt0_(USTAT_INCREMENTAL_FALSE_PROBABILITY, "0.01"), DDkwd__(USTAT_INCREMENTAL_UPDATE_STATISTICS, "ON"), DDkwd__(USTAT_INSERT_TO_NONAUDITED_TABLE, "OFF"), // Used internally to overcome problem in which insert // to the non-audited sample table must be done on same // process it was created on. This CQD is NOT externalized. DDkwd__(USTAT_INTERNAL_SORT, "HYBRID"), DDkwd__(USTAT_IS_IGNORE_UEC_FOR_MC, "OFF"), // if MCIS is ON, use IS to compute SC stats DDflt_0_1(USTAT_IS_MEMORY_FRACTION, "0.6"), DDflt0_(USTAT_IUS_INTERVAL_ROWCOUNT_CHANGE_THRESHOLD, "0.05"), DDflt0_(USTAT_IUS_INTERVAL_UEC_CHANGE_THRESHOLD, "0.05"), DDui1_6(USTAT_IUS_MAX_NUM_HASH_FUNCS, "5"), // the max disk space IUS CBFs can use is // MINOF(USTAT_IUS_MAX_PERSISTENT_DATA_IN_MB, // TtotalSpace * USTAT_IUS_MAX_PERSISTENT_DATA_IN_PERCENTAGE) DDui___(USTAT_IUS_MAX_PERSISTENT_DATA_IN_MB, "50000"), // 50GB DDflt0_(USTAT_IUS_MAX_PERSISTENT_DATA_IN_PERCENTAGE, "0.20"), // 20% of the total DDui1_6(USTAT_IUS_MAX_TRANSACTION_DURATION, "5"), // in minutes DDkwd__(USTAT_IUS_NO_BLOCK, "OFF"), DDansi_(USTAT_IUS_PERSISTENT_CBF_PATH, "SYSTEM"), DDflt0_(USTAT_IUS_TOTAL_ROWCOUNT_CHANGE_THRESHOLD, "0.05"), DDflt0_(USTAT_IUS_TOTAL_UEC_CHANGE_THRESHOLD, "0.05"), DDkwd__(USTAT_IUS_USE_PERIODIC_SAMPLING, "OFF"), DDkwd__(USTAT_JIT_LOGGING, "OFF"), DDkwd__(USTAT_LOCK_HIST_TABLES, "OFF"), DD_____(USTAT_LOG, "ULOG"), DDui30_246(USTAT_MAX_CHAR_BOUNDARY_LEN, "30"), // Values can be 30-246. DDui___(USTAT_MAX_CHAR_COL_LENGTH_IN_BYTES, "256"), // When computing UECs, char cols are limited to this many bytes DDflt0_ (USTAT_MAX_CHAR_DATASIZE_FOR_IS, "1000"), // max data size in MB for char type to use XDDui___(USTAT_MAX_READ_AGE_IN_MIN, "5760"), DDui___(USTAT_MAX_SAMPLE_AGE, "365"), // For R2.5 set to a year so user created samples won't be removed. // internal sort without checking UEC. DDflt0_(USTAT_MIN_CHAR_UEC_FOR_IS, "0.2"), // minimum UEC for char type to use internal sort DDflt0_(USTAT_MIN_DEC_BIN_UEC_FOR_IS, "0.0"), // minimum UEC for binary types to use internal sort DDflt0_(USTAT_MIN_ESTIMATE_FOR_ROWCOUNT, "10000000"), DDui1__(USTAT_MIN_ROWCOUNT_FOR_CTS_SAMPLE, "10000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_LOW_SAMPLE, "1000000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_SAMPLE, "10000"), DDflt0_(USTAT_MODIFY_DEFAULT_UEC, "0.05"), DDflt0_(USTAT_NAHEAP_ESTIMATED_MAX, "1.3"), // estimated max memory allocation (in GB) feasible with NAHEAP. XDDui1__(USTAT_NECESSARY_SAMPLE_MAX, "5000000"), // Maximum sample size with NECESSARY DDui1__(USTAT_NUM_MC_GROUPS_FOR_KEYS, "10"), XDDpct__(USTAT_OBSOLETE_PERCENT_ROWCOUNT, "15"), DDkwd__(USTAT_PROCESS_GAPS, "ON"), DD0_255(USTAT_RETRY_DELAY, "100"), DD0_255(USTAT_RETRY_LIMIT, "3"), DD0_255(USTAT_RETRY_NEC_COLS_LIMIT, "3"), // by default, use retry for AddNecessaryColumns DDui1__(USTAT_RETRY_SECURITY_COUNT, "120"), DDpct__(USTAT_SAMPLE_PERCENT_DIFF, "10"), DDansi_(USTAT_SAMPLE_TABLE_NAME, " "), DDansi_(USTAT_SAMPLE_TABLE_NAME_CREATE, " "), DDkwd__(USTAT_SHOW_MC_INTERVAL_INFO, "OFF"), DDkwd__(USTAT_SHOW_MFV_INFO, "OFF"), DDflte_(USTAT_UEC_HI_RATIO, "0.5"), DDflte_(USTAT_UEC_LOW_RATIO, "0.1"), DDkwd__(USTAT_USE_BACKING_SAMPLE, "OFF"), DDkwd__(USTAT_USE_BULK_LOAD, "OFF"), DDkwd__(USTAT_USE_GROUPING_FOR_SAMPLING, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC_LOOP, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC_NEW_HIST, "OFF"), // TEMP FOR TESTING -- SHOULD REMOVE DDkwd__(USTAT_USE_IS_WHEN_NO_STATS, "ON"), // use IS when no histograms exist for the column DDkwd__(USTAT_USE_SIDETREE_INSERT, "ON"), DDkwd__(USTAT_USE_SLIDING_SAMPLE_RATIO, "ON"), // Trend sampling rate down w/increasing table size, going // flat at 1%. XDDflt1_(USTAT_YOULL_LIKELY_BE_SORRY, "100000000"), // guard against unintentional long-running UPDATE STATS DDkwd__(VALIDATE_VIEWS_AT_OPEN_TIME, "OFF"), //this is the default length of a param which is typed as a VARCHAR. DD1_4096(VARCHAR_PARAM_DEFAULT_SIZE, "255"), // allows pcodes for varchars DDkwd__(VARCHAR_PCODE, "ON"), DDansi_(VOLATILE_CATALOG, ""), DDkwd__(VOLATILE_SCHEMA_IN_USE, "OFF"), // if this is set to ON or SYSTEM, then find a suitable key among all the // columns of a volatile table. // If this is set to OFF, and there is no user specified primary key or // store by clause, then make the first column of the volatile table // to be the clustering key. DDkwd__(VOLATILE_TABLE_FIND_SUITABLE_KEY, "SYSTEM"), // if this is set, and there is no user specified primary key or // store by clause, then make the first column of the volatile table // to be the clustering key. // Default is ON. DDkwd__(VOLATILE_TABLE_FIRST_COL_IS_CLUSTERING_KEY, "ON"), DDkwd__(VSBB_TEST_MODE, "OFF"), XDDkwd__(WMS_CHILD_QUERY_MONITORING, "OFF"), XDDkwd__(WMS_QUERY_MONITORING, "OFF"), // amount of work we are willing to assign per CPU for any query // not running at full system parallelism SDDflte_(WORK_UNIT_ESP, "0.08"), SDDflte_(WORK_UNIT_ESP_DATA_COPY_COST, "0.001"), // ZIG_ZAG_TREES ON means do ZIG_ZAG_TREES // $$$ OFF for beta DDkwd__(ZIG_ZAG_TREES, "SYSTEM"), DDkwd__(ZIG_ZAG_TREES_CONTROL, "OFF") }; // // NOTE: The defDefIx_ array is an array of integers that map // 'enum' values to defaultDefaults[] entries. // The defDefIx_ array could probably be made global static // since all threads should map the same 'enum' values to the // same defaultDefaults[] entries. Such as change is being // left to a future round of optimizations. // static THREAD_P size_t defDefIx_[__NUM_DEFAULT_ATTRIBUTES]; inline static const char *getAttrName(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].attrName; } inline static const char *getDefaultDefaultValue(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].value; } inline static const DefaultValidator *validator(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].validator; } inline static UInt32 getFlags(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].flags; } inline static NABoolean isFlagOn(Int32 attrEnum, NADefaultFlags flagbit) { #pragma nowarn(1506) // warning elimination return defaultDefaults[defDefIx_[attrEnum]].flags & (UInt32)flagbit; #pragma warn(1506) // warning elimination } inline static void setFlagOn(Int32 attrEnum, NADefaultFlags flagbit) { defaultDefaults[defDefIx_[attrEnum]].flags |= (UInt32)flagbit; } static NABoolean isSynonymOfRESET(NAString &value) { return (value == "RESET"); } static NABoolean isSynonymOfSYSTEM(Int32 attrEnum, NAString &value) { if (value == "") return TRUE; if (value == "SYSTEM") return !isFlagOn(attrEnum, DEFAULT_ALLOWS_SEPARATE_SYSTEM); if (value == "ENABLE"){ value = "ON"; return FALSE; } else if (value == "DISABLE"){ value = "OFF"; return FALSE; } // if (getDefaultDefaultValue(attrEnum) != NAString("DISABLE")) // cast reqd!! // return TRUE; // else // value = "ON"; return FALSE; } // Helper class used for holding and restoring CQDs class NADefaults::HeldDefaults { public: HeldDefaults(void); ~HeldDefaults(void); // CMPASSERT's on stack overflow void pushDefault(const char * value); // returns null if nothing to pop char * popDefault(void); private: enum { STACK_SIZE = 3 }; int stackPointer_; char * stackValue_[STACK_SIZE]; }; // Methods for helper class HeldDefaults NADefaults::HeldDefaults::HeldDefaults(void) : stackPointer_(0) { for (int i = 0; i < STACK_SIZE; i++) stackValue_[i] = NULL; } NADefaults::HeldDefaults::~HeldDefaults(void) { for (int i = 0; i < STACK_SIZE; i++) { if (stackValue_[i]) { NADELETEBASIC(stackValue_[i], NADHEAP); } } } // CMPASSERT's on stack overflow void NADefaults::HeldDefaults::pushDefault(const char * value) { CMPASSERT(stackPointer_ < STACK_SIZE); stackValue_[stackPointer_] = new NADHEAP char[strlen(value) + 1]; strcpy(stackValue_[stackPointer_],value); stackPointer_++; } // returns null if nothing to pop char * NADefaults::HeldDefaults::popDefault(void) { char * result = 0; if (stackPointer_ > 0) { stackPointer_--; result = stackValue_[stackPointer_]; stackValue_[stackPointer_] = NULL; } return result; } size_t NADefaults::numDefaultAttributes() { return (size_t)__NUM_DEFAULT_ATTRIBUTES; } // Returns current defaults in alphabetic order (for SHOWCONTROL listing). const char *NADefaults::getCurrentDefaultsAttrNameAndValue( size_t ix, const char* &name, const char* &value, NABoolean userDefaultsOnly) { if (ix < numDefaultAttributes()) { NABoolean get = FALSE; if (userDefaultsOnly) { // if this default was entered by user, return it. get = userDefault(defaultDefaults[ix].attrEnum); } else { // display the control if // - it is externalized or // - it is for support only and a CQD is set to show those, or // - a CQD is set to show all the controls get = (defaultDefaults[ix].flags & DEFAULT_IS_EXTERNALIZED) || // bit-AND ((defaultDefaults[ix].flags & DEFAULT_IS_FOR_SUPPORT) && (getToken(SHOWCONTROL_SHOW_SUPPORT) == DF_ON)) || (getToken(SHOWCONTROL_SHOW_ALL) == DF_ON); } if (get) { name = defaultDefaults[ix].attrName; value = currentDefaults_[defaultDefaults[ix].attrEnum]; return name; } } return name = value = NULL; } // ----------------------------------------------------------------------- // convert the default defaults into a table organized by enum values // ----------------------------------------------------------------------- void NADefaults::initCurrentDefaultsWithDefaultDefaults() { deleteMe(); const size_t numAttrs = numDefaultAttributes(); if (numAttrs != sizeof(defaultDefaults) / sizeof(DefaultDefault)) return; CMPASSERT_STRING (numAttrs == sizeof(defaultDefaults) / sizeof(DefaultDefault), "Check sqlcomp/DefaultConstants.h for a gap in enum DefaultConstants or sqlcomp/nadefaults.cpp for duplicate entries in array defaultDefaults[]."); SqlParser_NADefaults_Glob = SqlParser_NADefaults_ = new NADHEAP SqlParser_NADefaults(); provenances_ = new NADHEAP char [numAttrs]; // enum fits in 2 bits flags_ = new NADHEAP char [numAttrs]; resetToDefaults_ = new NADHEAP char * [numAttrs]; currentDefaults_ = new NADHEAP const char * [numAttrs]; currentFloats_ = new NADHEAP float * [numAttrs]; currentTokens_ = new NADHEAP DefaultToken * [numAttrs]; currentState_ = INIT_DEFAULT_DEFAULTS; heldDefaults_ = new NADHEAP HeldDefaults * [numAttrs]; // reset all entries size_t i = 0; for (i = 0; i < numAttrs; i++) { provenances_[i] = currentState_; flags_[i] = 0; defDefIx_[i] = 0; } memset( resetToDefaults_, 0, sizeof(char *) * numAttrs ); memset( currentDefaults_, 0, sizeof(char *) * numAttrs ); memset( currentFloats_, 0, sizeof(float *) * numAttrs ); memset( currentTokens_, 0, sizeof(DefaultToken *) * numAttrs ); memset( heldDefaults_, 0, sizeof(HeldDefaults *) * numAttrs ); #ifndef NDEBUG // This env-var turns on consistency checking of default-defaults and // other static info. The env-var does not get passed from sqlci to arkdev // until *AFTER* the initialization code runs, so you must do a static // arkcmp compile to do this checking. TEST050 does this, in fact. NABoolean nadval = !!getenv("NADEFAULTS_VALIDATE"); #endif // for each entry of the (alphabetically sorted) default defaults // table, enter the default default into the current default table // which is sorted by enum values NAString prevAttrName; for (i = 0; i < numAttrs; i++) { // the enum must be less than the max (if this assert fails // you might have made the range of constants in the enum // non-contiguous by assigning hard-coded numbers to some entries) CMPASSERT(ENUM_RANGE_CHECK(defaultDefaults[i].attrEnum)); // can't have the same enum value twice in defaultDefaults CMPASSERT(currentDefaults_[defaultDefaults[i].attrEnum] == NULL); // set currentDefaults_[enum] to the static string, // leaving the "allocated from heap" flag as FALSE char * value = new NADHEAP char[strlen(defaultDefaults[i].value) + 1]; strcpy(value,defaultDefaults[i].value); // trim trailing spaces (except UDR_JAVA_OPTION_DELIMITERS, since // trailing space is allowed for it) if (defaultDefaults[i].attrEnum != UDR_JAVA_OPTION_DELIMITERS) { Lng32 len = strlen(value); while ((len > 0) && (value[len-1] == ' ')) { value[len-1] = 0; len--; } } currentDefaults_[defaultDefaults[i].attrEnum] = value; // set up our backlink which maps [enum] to its defaultDefaults entry defDefIx_[defaultDefaults[i].attrEnum] = i; // attrs must be in ascending sorted order. If not, error out. if (prevAttrName > defaultDefaults[i].attrName) { SqlParser_NADefaults_ = NULL; return; } prevAttrName = defaultDefaults[i].attrName; // validate initial default default values CMPASSERT(defaultDefaults[i].validator); if (! defaultDefaults[i].validator->validate( defaultDefaults[i].value, this, defaultDefaults[i].attrEnum, +1/*warning*/)) { SqlParser_NADefaults_ = NULL; cerr << "\nERROR: " << defaultDefaults[i].attrName << " has invalid value" << defaultDefaults[i].value << endl; return; } // LCOV_EXCL_START // for debugging only #ifndef NDEBUG if (nadval) { // additional sanity checking we want to do occasionally NAString v; // ensure the static table really is in alphabetic order CMPASSERT(i == 0 || strcmp(defaultDefaults[i-1].attrName, defaultDefaults[i].attrName) < 0); // ensure these names are fit and trim and in canonical form v = defaultDefaults[i].attrName; TrimNAStringSpace(v); v.toUpper(); CMPASSERT(v == defaultDefaults[i].attrName); // validate initial default default values CMPASSERT(defaultDefaults[i].validator); defaultDefaults[i].validator->validate( defaultDefaults[i].value, this, defaultDefaults[i].attrEnum, +1/*warning*/); // ensure these values are fit and trim and in canonical form v = defaultDefaults[i].value; TrimNAStringSpace(v); defaultDefaults[i].validator->applyUpper(v); CMPASSERT(v == defaultDefaults[i].value); // alert the programmer if (isSynonymOfSYSTEM(defaultDefaults[i].attrEnum, v)) if (v != "" || defaultDefaults[i].validator != &validateAnsiName) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has SYSTEM default (" << v << ");\n\t read NOTE 2 in " << __FILE__ << endl; if (isSynonymOfRESET(v)) if (v != "" || defaultDefaults[i].validator != &validateAnsiName) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has RESET default (" << v << ");\n\t this makes no sense!" << endl; if (defaultDefaults[i].validator == &validateUnknown) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has a NO-OP validator" << endl; // the token keyword array must have no missing strings, // it must also be in alphabetic order, // each entry must be canonical, and // must have no embedded spaces (see token() method, space/uscore...) if (i == 0) for (size_t j = 0; j < DF_lastToken; j++) { CMPASSERT(keywords_[j]); CMPASSERT(j == 0 || strcmp(keywords_[j-1], keywords_[j]) < 0); NAString v(keywords_[j]); TrimNAStringSpace(v); v.toUpper(); // we know keywords must be caseINsens CMPASSERT(v == keywords_[j]); CMPASSERT(v.first(' ') == NA_NPOS); } } // if env-var #endif // NDEBUG // LCOV_EXCL_STOP } // for i // set the default value for GENERATE_EXPLAIN depending on whether // this is a static compile or a dynamic compile. if (CmpCommon::context()->GetMode() == STMT_STATIC) { currentDefaults_[GENERATE_EXPLAIN] = "ON"; currentDefaults_[DO_RUNTIME_EID_SPACE_COMPUTATION] = "ON"; } else { currentDefaults_[GENERATE_EXPLAIN] = "OFF"; currentDefaults_[DO_RUNTIME_EID_SPACE_COMPUTATION] = "OFF"; currentDefaults_[DETAILED_STATISTICS] = "OPERATOR"; } // set the default value of hive_catalog to the hive_system_catalog currentDefaults_[HIVE_CATALOG] = HIVE_SYSTEM_CATALOG; // set the default value of hbase_catalog to the hbase_system_catalog currentDefaults_[HBASE_CATALOG] = HBASE_SYSTEM_CATALOG; currentDefaults_[SEABASE_CATALOG] = TRAFODION_SYSCAT_LIT; // Test for TM_USE_SSCC from ms.env. // Only a setting of TM_USE_SSCC set to 1 will change the value to SSCC. // Otherwise, the default will remain at MVCC. char * ev = getenv("TM_USE_SSCC"); Lng32 useValue = 0; if (ev) { useValue = (Lng32)str_atoi(ev, str_len(ev)); if (useValue == 1) currentDefaults_[TRAF_TRANS_TYPE] = "SSCC"; } // Begin: Temporary workaround for SQL build regressions to pass NABoolean resetNeoDefaults = FALSE; // On SQ, the way to get an envvar from inside a un-attached process // is to use the msg_getenv_str() call and set the env inside // the SQ_PROP_ property file. In this case the property // file is $TRAF_HOME/etc/SQ_PROP_tdm_arkcmp which contains the line // "SQLMX_REGRESS=1". This file was generated by tools/setuplnxenv. // resetNeoDefaults = (msg_getenv_str("SQLMX_REGRESS") != NULL); resetNeoDefaults = (getenv("SQLMX_REGRESS") != NULL); if(resetNeoDefaults) { // turn on ALL stats during regressions run. currentDefaults_[COMP_BOOL_157] = "ON"; // turn on INTERNAL format for SHOWDDL statements currentDefaults_[SHOWDDL_DISPLAY_FORMAT] = "INTERNAL"; } // End: Temporary workaround for SQL build regressions to pass // Cache all the default keywords up front, // leaving other non-keyword token to be cached on demand. // The "keyword" that is not cached is the kludge/clever trick that // Matt puts in for NATIONAL_CHARSET. NAString tmp( NADHEAP ); for ( i = 0; i < numAttrs; i++ ) { #ifndef NDEBUG #pragma nowarn(1506) // warning elimination const DefaultValidatorType validatorType = validator(i)->getType(); #pragma warn(1506) // warning elimination #endif #pragma nowarn(1506) // warning elimination if ( validator(i)->getType() == VALID_KWD && (i != NATIONAL_CHARSET) && (i != INPUT_CHARSET) && (i != ISO_MAPPING) ) #pragma warn(1506) // warning elimination { currentTokens_[i] = new NADHEAP DefaultToken; // do not call 'token' method as it will return an error if FALSE // is to be inserted. Just directly assign DF_OFF to non-resetable defs. if (isNonResetableAttribute(defaultDefaults[defDefIx_[i]].attrName)) *currentTokens_[i] = DF_OFF; else #pragma nowarn(1506) // warning elimination *currentTokens_[i] = token( i, tmp ); #pragma warn(1506) // warning elimination } } if (getToken(MODE_SEABASE) == DF_ON) { currentDefaults_[CATALOG] = TRAFODION_SYSCAT_LIT; if (getToken(SEABASE_VOLATILE_TABLES) == DF_ON) { NAString sbCat = getValue(SEABASE_CATALOG); CmpCommon::context()->sqlSession()->setVolatileCatalogName(sbCat, TRUE); } } SqlParser_NADefaults_->NAMETYPE_ = getToken(NAMETYPE); SqlParser_NADefaults_->NATIONAL_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[NATIONAL_CHARSET]); SqlParser_NADefaults_->ISO_MAPPING_ = CharInfo::getCharSetEnum(currentDefaults_[ISO_MAPPING]); SqlParser_NADefaults_->DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[DEFAULT_CHARSET]); SqlParser_NADefaults_->ORIG_DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[DEFAULT_CHARSET]); // Set the NAString_isoMappingCS memory cache for use by routines // ToInternalIdentifier() and ToAnsiIdentifier[2|3]() in module // w:/common/NAString[2].cpp. These routines currently cannot // access SqlParser_ISO_MAPPING directly due to the complex // build hierarchy. NAString_setIsoMapCS((SQLCHARSET_CODE) SqlParser_NADefaults_->ISO_MAPPING_); } NADefaults::NADefaults(NAMemory * h) : provenances_(NULL) , flags_(NULL) , resetToDefaults_(NULL) , currentDefaults_(NULL) , currentFloats_(NULL) , currentTokens_(NULL) , heldDefaults_(NULL) , currentState_(UNINITIALIZED) , readFromSQDefaultsTable_(FALSE) , SqlParser_NADefaults_(NULL) , catSchSetToUserID_(NULL) , heap_(h) , resetAll_(FALSE) , defFlags_(0) , tablesRead_(h) { static THREAD_P NABoolean systemParamterUpdated = FALSE; // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, if (!systemParamterUpdated && !cmpCurrentContext->isStandalone()) { updateSystemParameters(); systemParamterUpdated = TRUE; } // then copy DefaultDefaults into CurrentDefaults. initCurrentDefaultsWithDefaultDefaults(); // Set additional defaultDefaults flags: // If an attr allows ON/OFF/SYSTEM and the default-default is not SYSTEM, // then you must set this flag. Otherwise, CQD attr 'system' will revert // the value back to the default-default, which is not SYSTEM. // setFlagOn(...attr..., DEFAULT_ALLOWS_SEPARATE_SYSTEM); // // (See attESPPara in OptPhysRelExpr.cpp.) setFlagOn(ATTEMPT_ESP_PARALLELISM, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(HJ_TYPE, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(ZIG_ZAG_TREES, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(COMPRESSED_INTERNAL_FORMAT, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(COMPRESSED_INTERNAL_FORMAT_BMO, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(HBASE_SMALL_SCANNER, DEFAULT_ALLOWS_SEPARATE_SYSTEM); } NADefaults::~NADefaults() { deleteMe(); } void NADefaults::deleteMe() { if (resetToDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(resetToDefaults_[i], NADHEAP); NADELETEBASIC(resetToDefaults_, NADHEAP); } if (currentDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) if (provenances_[i] > INIT_DEFAULT_DEFAULTS) NADELETEBASIC(currentDefaults_[i], NADHEAP); NADELETEBASIC(currentDefaults_, NADHEAP); } if (currentFloats_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentFloats_[i], NADHEAP); NADELETEBASIC(currentFloats_, NADHEAP); } if (currentTokens_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentTokens_[i], NADHEAP); NADELETEBASIC(currentTokens_, NADHEAP); } if (heldDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETE(heldDefaults_[i], HeldDefaults, NADHEAP); NADELETEBASIC(heldDefaults_, NADHEAP); } for (CollIndex i = tablesRead_.entries(); i--; ) tablesRead_.removeAt(i); NADELETEBASIC(provenances_, NADHEAP); NADELETEBASIC(flags_, NADHEAP); NADELETE(SqlParser_NADefaults_, SqlParser_NADefaults, NADHEAP); } // ----------------------------------------------------------------------- // Find the attribute name from its enum value in the defaults table. // ----------------------------------------------------------------------- const char *NADefaults::lookupAttrName(Int32 attrEnum, Int32 errOrWarn) { if (ATTR_RANGE_CHECK) return getAttrName(attrEnum); static THREAD_P char noSuchAttr[20]; sprintf(noSuchAttr, "**%d**", attrEnum); if (errOrWarn) // $0~string0 is not the name of any DEFAULTS table attribute. *CmpCommon::diags() << DgSqlCode(ERRWARN(2050)) << DgString0(noSuchAttr); return noSuchAttr; } // ----------------------------------------------------------------------- // Find the enum value from its string representation in the defaults table. // ----------------------------------------------------------------------- enum DefaultConstants NADefaults::lookupAttrName(const char *name, Int32 errOrWarn, Int32 *position) { NAString attrName(name); TrimNAStringSpace(attrName, FALSE, TRUE); // trim trailing blanks only attrName.toUpper(); // start with the full range of defaultDefaults size_t lo = 0; size_t hi = numDefaultAttributes(); size_t split; Int32 cresult; // perform a binary search in the ordered table defaultDefaults do { // compare the token with the middle entry in the range split = (lo + hi) / 2; cresult = attrName.compareTo(defaultDefaults[split].attrName); if (cresult < 0) { // token < split value, search first half of range hi = split; } else if (cresult > 0) { if (lo == split) // been there, done that { CMPASSERT(lo == hi-1); break; } // token > split value, search second half of range lo = split; } } while (cresult != 0 && lo < hi); if (position != 0) #pragma nowarn(1506) // warning elimination *position = split; #pragma warn(1506) // warning elimination // if the last comparison result was equal, return value at "split" if (cresult == 0) return defaultDefaults[split].attrEnum; // otherwise the string has no corresponding enum value if (errOrWarn) // $0~string0 is not the name of any DEFAULTS table attribute. *CmpCommon::diags() << DgSqlCode(ERRWARN(2050)) << DgString0(attrName); return __INVALID_DEFAULT_ATTRIBUTE; // negative } #define WIDEST_CPUARCH_VALUE 30 // also wider than any utoa_() result static void utoa_(UInt32 val, char *buf) { sprintf(buf, "%u", val); } static void itoa_(Int32 val, char *buf) { sprintf(buf, "%d", val); } static void ftoa_(float val, char *buf) { snprintf(buf, WIDEST_CPUARCH_VALUE, "%0.2f", val); } // Updates the system parameters in the defaultDefaults table. void NADefaults::updateSystemParameters(NABoolean reInit) { static const char *arrayOfSystemParameters[] = { "DEF_CPU_ARCHITECTURE", "DEF_DISCS_ON_CLUSTER", "DEF_INSTRUCTIONS_SECOND", "DEF_PAGE_SIZE", "DEF_LOCAL_CLUSTER_NUMBER", "DEF_LOCAL_SMP_NODE_NUMBER", "DEF_NUM_SMP_CPUS", "MAX_ESPS_PER_CPU_PER_OP", "DEFAULT_DEGREE_OF_PARALLELISM", "DEF_NUM_NODES_IN_ACTIVE_CLUSTERS", // this is deliberately not in the list: "DEF_CHUNK_SIZE", "DEF_NUM_BM_CHUNKS", "DEF_PHYSICAL_MEMORY_AVAILABLE", //returned in KB not bytes "DEF_TOTAL_MEMORY_AVAILABLE", //returned in KB not bytes "DEF_VIRTUAL_MEMORY_AVAILABLE" , "USTAT_IUS_PERSISTENT_CBF_PATH" }; //returned in KB not bytes char valuestr[WIDEST_CPUARCH_VALUE]; // Set up global cluster information. setUpClusterInfo(CmpCommon::contextHeap()); // Extract SMP node number and cluster number where this arkcmp is running. short nodeNum = 0; Int32 clusterNum = 0; OSIM_getNodeAndClusterNumbers(nodeNum, clusterNum); // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, // then copy DefaultDefaults into CurrentDefaults. if (!cmpCurrentContext->isStandalone()) { size_t numElements = sizeof(arrayOfSystemParameters) / sizeof(char *); for (size_t i = 0; i < numElements; i++) { Int32 j; // perform a lookup for the string, using a binary search lookupAttrName(arrayOfSystemParameters[i], -1, &j); CMPASSERT(j >= 0); if(reInit) NADELETEBASIC(defaultDefaults[j].value,NADHEAP); char *newValue = new (GetCliGlobals()->exCollHeap()) char[WIDEST_CPUARCH_VALUE]; newValue[0] = '\0'; defaultDefaults[j].value = newValue; switch(defaultDefaults[j].attrEnum) { case DEF_CPU_ARCHITECTURE: switch(gpClusterInfo->cpuArchitecture()) { // 123456789!1234567890@123456789 case CPU_ARCH_INTEL_80386: strcpy(newValue, "INTEL_80386"); break; case CPU_ARCH_INTEL_80486: strcpy(newValue, "INTEL_80486"); break; case CPU_ARCH_PENTIUM: strcpy(newValue, "PENTIUM"); break; case CPU_ARCH_PENTIUM_PRO: strcpy(newValue, "PENTIUM_PRO"); break; case CPU_ARCH_MIPS: strcpy(newValue, "MIPS"); break; case CPU_ARCH_ALPHA: strcpy(newValue, "ALPHA"); break; case CPU_ARCH_PPC: strcpy(newValue, "PPC"); break; default: strcpy(newValue, "UNKNOWN"); break; } if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j], FALSE); break; case DEF_DISCS_ON_CLUSTER: strcpy(newValue, "8"); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_PAGE_SIZE: utoa_(gpClusterInfo->pageSize(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_LOCAL_CLUSTER_NUMBER: utoa_(clusterNum, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_LOCAL_SMP_NODE_NUMBER: utoa_(nodeNum, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_NUM_SMP_CPUS: utoa_(gpClusterInfo->numberOfCpusPerSMP(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEFAULT_DEGREE_OF_PARALLELISM: { Lng32 x = 2; utoa_(x, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case MAX_ESPS_PER_CPU_PER_OP: { float espsPerCore = computeNumESPsPerCore(FALSE); ftoa_(espsPerCore, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case DEF_NUM_NODES_IN_ACTIVE_CLUSTERS: utoa_(gpClusterInfo->numOfPhysicalSMPs(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_PHYSICAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->physicalMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_TOTAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->totalMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_VIRTUAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->virtualMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_NUM_BM_CHUNKS: { UInt32 numChunks = (UInt32) (gpClusterInfo->physicalMemoryAvailable() / def_DEF_CHUNK_SIZE / 4); utoa_(numChunks, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case DEF_INSTRUCTIONS_SECOND: { Int32 frequency, speed; frequency = gpClusterInfo->processorFrequency(); switch (gpClusterInfo->cpuArchitecture()) { case CPU_ARCH_PENTIUM_PRO: speed = (Int32) (frequency * 0.5); break; case CPU_ARCH_PENTIUM: speed = (Int32) (frequency * 0.4); break; default: speed = (Int32) (frequency * 0.3); break; } itoa_(speed, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case USTAT_IUS_PERSISTENT_CBF_PATH: { // set the CQD it to $HOME/cbfs const char* home = getenv("HOME"); if ( home ) { str_cat(home, "/cbfs", newValue); } } break; default: #ifndef NDEBUG cerr << "updateSystemParameters: no case for " << defaultDefaults[j].attrName << endl; #endif break; } // switch (arrayOfSystemParameters) } // for } // isStandalone } // updateSystemParameters() //============================================================================== // Get SMP node number and cluster number on which this arkcmp.exe is running. //============================================================================== void NADefaults::getNodeAndClusterNumbers(short& nodeNum, Int32& clusterNum) { SB_Phandle_Type pHandle; Int32 error = XPROCESSHANDLE_GETMINE_(&pHandle); Int32 nodeNumInt; // XPROCESSHANDLE_DECOMPOSE_ takes an integer. Int32 pin; error = XPROCESSHANDLE_DECOMPOSE_(&pHandle, &nodeNumInt, &pin, &clusterNum); nodeNum = nodeNumInt; // Store 4-byte integer back to short integer CMPASSERT(error == 0); } inline static NABoolean initializeSQLdone() { return FALSE; } // Setup for readFromSQLTable(): // #include "SQLCLIdev.h" const SQLMODULE_ID __SQL_mod_866668761818000 = { /* version */ SQLCLI_CURRENT_VERSION, /* module name */ "HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.READDEF_N29_000", /* time stamp */ 866668761818000LL, /* char set */ "ISO88591", /* name length */ 47 }; static const Int32 MAX_VALUE_LEN = 1000; // Read the SQL defaults table, to layer on further defaults. // // [1] This is designed such that it can be called multiple times // (a site-wide defaults table, then a user-specific one, e.g.) // and by default it will supersede values read/computed from earlier tables. // // [2] It can also be called *after* CQD's have been issued // (e.g. from the getCatalogAndSchema() method) // and by default it will supersede values from earlier tables // but *not* explicitly CQD-ed settings. // // This default behavior is governed by the overwrite* arguments in // various methods (see the .h file). Naturally you can override such behavior, // e.g., if you wanted to reset to an earlier state, erasing all user CQD's. // void NADefaults::readFromSQLTable(const char *tname, Provenance overwriteIfNotYet, Int32 errOrWarn) { char value[MAX_VALUE_LEN + 1]; // CMPASSERT(MAX_VALUE_LEN >= ComMAX_2_PART_EXTERNAL_UCS2_NAME_LEN_IN_NAWCHARS); // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, // then copy DefaultDefaults into CurrentDefaults. if (!cmpCurrentContext->isStandalone()) { Lng32 initialErrCnt = CmpCommon::diags()->getNumber(); // Set this *before* doing any insert()'s ... currentState_ = READ_FROM_SQL_TABLE; Int32 loop_here=0; while (loop_here > 10) { loop_here++; if (loop_here > 1000) loop_here=100; } if (tname) { NABoolean isSQLTable = TRUE; if (*tname == ' ') { // called from NADefaults::readFromFlatFile() isSQLTable = FALSE; // -- see kludge in .h file! tname++; } char attrName[101]; // column ATTRIBUTE VARCHAR(100) UPSHIFT Int32 sqlcode; static THREAD_P struct SQLCLI_OBJ_ID __SQL_id0; FILE *flatfile = NULL; if (isSQLTable) { init_SQLCLI_OBJ_ID(&__SQL_id0, SQLCLI_CURRENT_VERSION, cursor_name, &__SQL_mod_866668761818000, "S1", 0, SQLCHARSETSTRING_ISO88591, 2); /* EXEC SQL OPEN S1; See file NADefaults.mdf for cursor declaration */ sqlcode = SQL_EXEC_ClearDiagnostics(&__SQL_id0); sqlcode = SQL_EXEC_Exec(&__SQL_id0,NULL,1,tname,NULL); } else { flatfile = fopen(tname, "r"); sqlcode = flatfile ? 0 : -ABS(arkcmpErrorFileOpenForRead); } /* EXEC SQL FETCH S1 INTO :attrName, :value; */ // Since the DEFAULTS table is PRIMARY KEY (SUBSYSTEM, ATTRIBUTE), // we'll fetch (scanning the clustering index) // CATALOG before SCHEMA; this is important if user has rows like // ('CATALOG','c1') and ('SCHEMA','c2.sn') -- // the schema setting must supersede the catalog one. // We should also put an ORDER BY into the cursor decl in the .mdf, // to handle user-created DEFAULTS tables w/o a PK. if (sqlcode >= 0) if (isSQLTable) { sqlcode = SQL_EXEC_Fetch(&__SQL_id0,NULL,2,attrName,NULL,value,NULL); if (sqlcode >= 0) readFromSQDefaultsTable_ = TRUE; } else { value[0] = 0; // NULL terminator if (fscanf(flatfile, " %100[A-Za-z0-9_#] ,", attrName) < 0) sqlcode = +100; else fgets((char *) value, sizeof(value), flatfile); } // Ignore warnings except for end-of-data while (sqlcode >= 0 && sqlcode != +100) { NAString v(value); // skip comments, indicated by a # if (attrName[0] != '#') validateAndInsert(attrName, v, FALSE, errOrWarn, overwriteIfNotYet); /* EXEC SQL FETCH S1 INTO :attrName, :value; */ if (isSQLTable) sqlcode = SQL_EXEC_Fetch(&__SQL_id0,NULL,2,attrName,NULL,value,NULL); else { value[0] = 0; // NULL terminator if (fscanf(flatfile, " %100[A-Za-z0-9_#] ,", attrName) < 0) sqlcode = +100; else fgets((char *) value, sizeof(value), flatfile); } } if (sqlcode < 0 && errOrWarn && initializeSQLdone()) { if (ABS(sqlcode) == ABS(CLI_MODULEFILE_OPEN_ERROR) && cmpCurrentContext->isInstalling()) { // Emit no warning when (re)installing, // because obviously the module will not exist before we have // (re)arkcmp'd it! } else { // 2001 Error $0 reading table $1. Using $2 values. CollIndex n = tablesRead_.entries(); const char *errtext = n ? tablesRead_[n-1].data() : "default-default"; *CmpCommon::diags() << DgSqlCode(ERRWARN(2001)) << DgInt0(sqlcode) << DgTableName(tname) << DgString0(errtext); } } if (isSQLTable) { /* EXEC SQL CLOSE S1; */ sqlcode = SQL_EXEC_ClearDiagnostics(&__SQL_id0); sqlcode = SQL_EXEC_CloseStmt(&__SQL_id0); // The above statement should not start any transactions because // it uses read uncommitted access. If it ever changes, then we // would need to commit it at this time. } } // tname if (initialErrCnt < CmpCommon::diags()->getNumber() && errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2059)) << DgString0(tname ? tname : ""); } // isStandalone } // NADefaults::readFromSQLTable() void NADefaults::readFromSQLTables(Provenance overwriteIfNotYet, Int32 errOrWarn) { NABoolean cat = FALSE; NABoolean sch = FALSE; if (getToken(MODE_SEABASE) == DF_ON && !readFromSQDefaultsTable()) { // Read system defaults from configuration file. // keep this name in sync with file cli/SessionDefaults.cpp NAString confFile(getenv("TRAF_HOME")); confFile += "/etc/SQSystemDefaults.conf"; readFromFlatFile(confFile, overwriteIfNotYet, errOrWarn); tablesRead_.insert(confFile); CmpSeabaseDDL cmpSBD((NAHeap *)heap_, FALSE); Lng32 hbaseErr = 0; NAString hbaseErrStr; Lng32 errNum = cmpSBD.validateVersions(this, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &hbaseErr, &hbaseErrStr); if (errNum == 0) // seabase is initialized properly { // read from seabase defaults table cmpSBD.readAndInitDefaultsFromSeabaseDefaultsTable (overwriteIfNotYet, errOrWarn, this); // set authorization state NABoolean checkAllPrivTables = FALSE; errNum = cmpSBD.isPrivMgrMetadataInitialized(this,checkAllPrivTables); CmpCommon::context()->setAuthorizationState(errNum); } else { CmpCommon::context()->setIsUninitializedSeabase(TRUE); CmpCommon::context()->uninitializedSeabaseErrNum() = errNum; CmpCommon::context()->hbaseErrNum() = hbaseErr; CmpCommon::context()->hbaseErrStr() = hbaseErrStr; } } currentState_ = SET_BY_CQD; // enter the next state... // Make self fully consistent, by executing deferred actions last of all getSqlParser_NADefaults(); } // NADefaults::readFromSQLTables() // This method is used by SchemaDB::initPerStatement const char * NADefaults::getValueWhileInitializing(Int32 attrEnum) { // We can't rely on our state_ because SQLC might have called CQD::bindNode() // which does a setState(SET_BY_CQD)... if (!tablesRead_.entries()) if (getProvenance(attrEnum) < SET_BY_CQD) readFromSQLTables(SET_BY_CQD); return getValue(attrEnum); } // This method is used by SchemaDB::initPerStatement *and* // by CmpCommon, CmpStatement, and SQLC/SQLCO. void NADefaults::getCatalogAndSchema(NAString &cat, NAString &sch) { cat = getValueWhileInitializing(CATALOG); sch = getValueWhileInitializing(SCHEMA); } // Should be called only privately and by DefaultValidator! Int32 NADefaults::validateFloat(const char *value, float &result, Int32 attrEnum, Int32 errOrWarn) const { Int32 n = -1; // NT's scanf("%n") is not quite correct; hence this code-around sscanf(value, "%g%n", &result, &n); if (n > 0 && value[n] == '\0') { switch (attrEnum) { case HIVE_INSERT_ERROR_MODE: { Lng32 v = str_atoi(value, str_len(value)); if (v >= 0 && v <= 3) return TRUE; } break; default: return TRUE; // a valid float } } NAString v(value); NABoolean silentIf = (errOrWarn == SilentIfSYSTEM); if (silentIf) errOrWarn = 0/*silent*/; NABoolean useSYSTEM = (token(attrEnum, v, TRUE, errOrWarn) == DF_SYSTEM); if (useSYSTEM && silentIf) // ValidateNumeric is caller return SilentIfSYSTEM; // special it-is-valid return! if (errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2055)) << DgString0(value) << DgString1(lookupAttrName(attrEnum, errOrWarn)); if (useSYSTEM) { // programmer error CMPASSERT("Numeric attr allows SYSTEM -- you need to call token() first to see if its current value is this keyword, and compute your system default value!" == NULL); } // ensure an out-of-range error if domainMatch or ValidateNumeric is called result = -FLT_MAX; return FALSE; // not valid } NABoolean NADefaults::insert(Int32 attrEnum, const NAString &value, Int32 errOrWarn) { // private method; callers have all already done this: ATTR_RANGE_ASSERT; assert(errOrWarn != SilentIfSYSTEM); // yeh private, but just in case // Update cache: // (Re)validate that new value is numeric. // Useful if programmer did not correctly specify the DefaultValidator for // this attr in DefaultDefaults. // if (currentFloats_[attrEnum]) { float result; if (validateFloat(value, result, attrEnum, errOrWarn)) *currentFloats_[attrEnum] = result; else return FALSE; // not a valid float } // Update cache for DefaultToken by deallocating the cached entry. if ( currentTokens_[attrEnum] ) { NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; } // If we're past the read-from-SQLTable phase, then // the first CQD of a given attr must first save the from-SQLTable value, // to which the user can RESET if desired. // if (currentState_ >= SET_BY_CQD && !resetToDefaults_[attrEnum]) { NAString currValStr(currentDefaults_[attrEnum]); Lng32 currValLen = str_len(currValStr) + 1; char *pCurrVal = new NADHEAP char[currValLen]; str_cpy_all(pCurrVal, currValStr, currValLen); resetToDefaults_[attrEnum] = pCurrVal; } char *newVal = NULL; Lng32 newValLen = str_len(value) + 1; if (provenances_[attrEnum] > INIT_DEFAULT_DEFAULTS) { Lng32 oldValLen = str_len(currentDefaults_[attrEnum]) + 1; if (oldValLen >= newValLen && oldValLen < newValLen + 100) newVal = const_cast<char*>(currentDefaults_[attrEnum]); // reuse, to reduce mem frag else NADELETEBASIC(currentDefaults_[attrEnum], NADHEAP); } if (!newVal) newVal = new NADHEAP char[newValLen]; str_cpy_all(newVal, value, newValLen); currentDefaults_[attrEnum] = newVal; // when the parser flag is on for a set-once CQD // set its provenance as INIT_DEFAULT_DEFAULTS, // so the user can set it once later if ( isSetOnceAttribute(attrEnum) && Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) ) { provenances_[attrEnum] = INIT_DEFAULT_DEFAULTS; } else { provenances_[attrEnum] = currentState_; } return TRUE; } NADefaults::Provenance NADefaults::getProvenance(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return (Provenance)provenances_[attrEnum]; } NABoolean NADefaults::getValue(Int32 attrEnum, NAString &result) const { ATTR_RANGE_ASSERT; result = currentDefaults_[attrEnum]; return TRUE; // we always have a STRING REPRESENTATION value } NAString NADefaults::getString(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return currentDefaults_[attrEnum]; } const char * NADefaults::getValue(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return currentDefaults_[attrEnum]; } NABoolean NADefaults::getFloat(Int32 attrEnum, float &result) const { ATTR_RANGE_ASSERT; if (currentFloats_[attrEnum]) { result = *currentFloats_[attrEnum]; } else if (validateFloat(currentDefaults_[attrEnum], result, attrEnum)) { currentFloats_[attrEnum] = new NADHEAP float; // cache the result *currentFloats_[attrEnum] = result; } else { return FALSE; // result is neg, from failed validateFloat() } return TRUE; } double NADefaults::getAsDouble(Int32 attrEnum) const { // No domainMatch() needed: any float or double (or int or uint) is okay; // getFloat()/validateFloat() will disallow any non-numerics. float flt; getFloat(attrEnum, flt); return double(flt); } Lng32 NADefaults::getAsLong(Int32 attrEnum) const { float flt; getFloat(attrEnum, flt); if (!domainMatch(attrEnum, VALID_INT, &flt)) { CMPBREAK; } return Lng32(flt); } ULng32 NADefaults::getAsULong(Int32 attrEnum) const { float flt; getFloat(attrEnum, flt); if (!domainMatch(attrEnum, VALID_UINT, &flt)) { CMPBREAK; } return (ULng32)(flt); } ULng32 NADefaults::getNumOfESPsPerNode() const { return (ULng32)MAXOF(ceil(getNumOfESPsPerNodeInFloat()), 1); } float NADefaults::getNumOfESPsPerNodeInFloat() const { double maxEspPerCpuPerOp = getAsDouble(MAX_ESPS_PER_CPU_PER_OP); CollIndex cores = ( (CmpCommon::context() && CURRSTMT_OPTDEFAULTS->isFakeHardware()) ) ? getAsLong(DEF_NUM_SMP_CPUS) : gpClusterInfo->numberOfCpusPerSMP(); return float(maxEspPerCpuPerOp * cores); } ULng32 NADefaults::getTotalNumOfESPsInCluster(NABoolean& fakeEnv) const { fakeEnv = FALSE; if (getToken(PARALLEL_NUM_ESPS, 0) != DF_SYSTEM ) { fakeEnv = TRUE; return getAsLong(PARALLEL_NUM_ESPS); } float espsPerNode = getNumOfESPsPerNodeInFloat(); CollIndex numOfNodes = gpClusterInfo->numOfSMPs(); if ( (CmpCommon::context() && CURRSTMT_OPTDEFAULTS->isFakeHardware())) { fakeEnv = TRUE; numOfNodes = getAsLong(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS); } return MAXOF(ceil(espsPerNode * numOfNodes), 1); } NABoolean NADefaults::domainMatch(Int32 attrEnum, Int32 expectedType/*DefaultValidatorType*/, float *flt) const { if (validator(attrEnum)->getType() == expectedType) return TRUE; // yes, domains match // Emit error messages only if the value is actually out-of-range. // // Users (optimizer code) should REALLY be using 'unsigned long' fields // and calling getAsULong, instead of using 'long' fields to retrieve // unsigned(DDui*) attr values via getAsLong ... // // LCOV_EXCL_START // if we get here the compiler will crash if (flt) { DefaultValidator *validator = NULL; if (expectedType == VALID_INT) validator = (DefaultValidator *)&validateInt; else if (expectedType == VALID_UINT) validator = (DefaultValidator *)&validateUI; // Explicitly check for TRUE here -- // both FALSE/error and SilentIfSYSTEM are out-of-range/out-of-domain // from this method's point of view. if (validator) if (validator->validate( currentDefaults_[attrEnum], this, attrEnum, -1, flt) == TRUE) return TRUE; // domain mismatch, but value *is* in the domain range } // fall thru to emit additional failure info *CmpCommon::diags() << DgSqlCode(+2058) // emit a mismatch WARNING << DgString0(lookupAttrName(attrEnum)) << DgString1(validator(attrEnum)->getTypeText()) << DgString2(DefaultValidator::getTypeText( DefaultValidatorType(expectedType))); #ifndef NDEBUG cerr << "Warning[2058] " << lookupAttrName(attrEnum) << " " << validator(attrEnum)->getTypeText() << " " << DefaultValidator::getTypeText( DefaultValidatorType(expectedType)) << " " << (flt ? *flt : 123.45) << endl; #endif // LCOV_EXCL_STOP return FALSE; } // CONTROL QUERY DEFAULT attr RESET; // resets the single attr to the value it had right after we read all // the DEFAULTS tables, // or the value it had right before a CQD * RESET RESET. // CONTROL QUERY DEFAULT * RESET; // resets all attrs to the values they had by same criteria as above. // CONTROL QUERY DEFAULT * RESET RESET; // resets the "reset-to" values so that all current values become the // effective "reset-to"'s -- i.e, the current values can't be lost // on the next CQD * RESET; // Useful for apps that dynamically send startup settings that ought // to be preserved -- ODBC and SQLCI do this. // void NADefaults::resetAll(NAString &value, NABoolean reset, Int32 errOrWarn) { size_t i, numAttrs = numDefaultAttributes(); if (reset == 1) { // CQD * RESET; (not RESET RESET) setResetAll(TRUE); for (i = 0; i < numAttrs; i++) { const char * attributeName = defaultDefaults[i].attrName; DefaultConstants attrEnum = lookupAttrName(attributeName, errOrWarn); if (isNonResetableAttribute(attributeName)) continue; validateAndInsert(attributeName, value, TRUE, errOrWarn); } // if DEFAULT_SCHEMA_NAMETYPE=USER after CQD * RESET // set SCHEMA to LDAP_USERNAME // if SCHEMA has not been specified by user if ( (getToken(DEFAULT_SCHEMA_NAMETYPE) == DF_USER) && schSetByNametype() ) { setSchemaAsLdapUser(); } setResetAll(FALSE); } else if (reset == 2) { for (i = 0; i < numAttrs; i++) { if (resetToDefaults_[i]) { // CONTROL QUERY DEFAULT * RESET RESET; -- this code cloned below // Can't reset prov, because to which? // provenances_[i] = READ_FROM_SQL_TABLE or COMPUTED ?? NADELETEBASIC(resetToDefaults_[i], NADHEAP); resetToDefaults_[i] = NULL; } } } else { CMPASSERT(!reset); } } // Reset to default-defaults, as if readFromSQLTables() had not executed, // but setting state and provenance so no future reads will be triggered. // See StaticCompiler and Genesis 10-990204-2469 above for motivation. void NADefaults::undoReadsAndResetToDefaultDefaults() { initCurrentDefaultsWithDefaultDefaults(); } NABoolean NADefaults::isReadonlyAttribute(const char* attrName) const { if ((( stricmp(attrName, "ISO_MAPPING") == 0 ) || ( stricmp(attrName, "OVERFLOW_MODE") == 0 ) || ( stricmp(attrName, "SORT_ALGO") == 0 )) && ( CmpCommon::getDefault(DISABLE_READ_ONLY) == DF_ON )) return FALSE; // for internal development and testing purposes if (( stricmp(attrName, "ISO_MAPPING") == 0 )|| ( stricmp(attrName, "NATIONAL_CHARSET") == 0 ) || ( stricmp(attrName, "VALIDATE_VIEWS_AT_OPEN_TIME") == 0 ) || ( stricmp(attrName, "USER_EXPERIENCE_LEVEL") == 0 ) || ( stricmp(attrName, "POS_DISKS_IN_SEGMENT") == 0 ) || ( stricmp(attrName, "BMO_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN") == 0 ) || ( stricmp(attrName, "BMO_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY") == 0 ) || ( stricmp(attrName, "BMO_MEMORY_LIMIT_LOWER_BOUND_SORT") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_SEQUENCE") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE") == 0 ) || ( stricmp(attrName, "SORT_ALGO") == 0 ) || ( stricmp(attrName, "OVERFLOW_MODE") == 0 ) ) return TRUE; if (strlen(attrName) > 0) { DefaultConstants v = lookupAttrName(attrName, 0, 0); if ((v != __INVALID_DEFAULT_ATTRIBUTE) && (getFlags(v) & DEFAULT_IS_SSD)) return TRUE; } return FALSE; } // these defaults cannot be reset or set to FALSE through a cqd. NABoolean NADefaults::isNonResetableAttribute(const char* attrName) const { if (( stricmp(attrName, "IS_SQLCI") == 0 ) || ( stricmp(attrName, "NVCI_PROCESS") == 0 ) || ( stricmp(attrName, "SESSION_ID") == 0 ) || ( stricmp(attrName, "LDAP_USERNAME") == 0 ) || ( stricmp(attrName, "VOLATILE_SCHEMA_IN_USE") == 0 ) || ( stricmp(attrName, "SESSION_USERNAME") == 0 ) ) return TRUE; return FALSE; } // these defaults can be set only once by user. NABoolean NADefaults::isSetOnceAttribute(Int32 attrEnum) const { if ( attrEnum == DEFAULT_SCHEMA_ACCESS_ONLY || attrEnum == PUBLISHING_ROLES ) return TRUE; return FALSE; } void NADefaults::resetSessionOnlyDefaults() { NAString value; validateAndInsert("NVCI_PROCESS", value, 3, 0); } // Parameter <reset> must not be a reference (&); // see <value = ... fall thru> below. enum DefaultConstants NADefaults::validateAndInsert(const char *attrName, NAString &value, NABoolean reset, Int32 errOrWarn, Provenance overwriteIfNotYet) { NABoolean overwrite = FALSE; NABoolean isJDBC = FALSE; NABoolean isODBC = FALSE; if (ActiveSchemaDB()) { isJDBC = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON ? TRUE : FALSE); isODBC = (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON ? TRUE : FALSE); } if (reset && !attrName[0]) { // CONTROL QUERY DEFAULT * RESET overwrite = currentState_ < overwriteIfNotYet; if (overwrite) resetAll(value, reset, errOrWarn); return (DefaultConstants)0; // success } // Perform a lookup for the string, using a binary search. DefaultConstants attrEnum = lookupAttrName(attrName, errOrWarn); if (attrEnum >= 0) { // valid attrName // ignore DEFAULT_SCHEMA_ACCESS_ONLY if it is in system defaults if ( attrEnum == DEFAULT_SCHEMA_ACCESS_ONLY && getState() < SET_BY_CQD ) return attrEnum; // do the following check when // this is the primary mxcmp // and INTERNAL_QUERY_FROM_EXEUTIL is not set if (!CmpCommon::context()->isSecondaryMxcmp() && !Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) { // This logic will catch if the set-once CQD // is set, but the ALLOW_SET_ONCE_DEFAULTS parserflags // are not set. This is absolutely necessary for security // to ensure that the correct parserflags are set. if ((isSetOnceAttribute(attrEnum)) && (!isResetAll()) && // no error msg for cqd * reset (NOT Get_SqlParser_Flags(ALLOW_SET_ONCE_DEFAULTS))) { *CmpCommon::diags() << DgSqlCode(-30042) << DgString0(attrName); return attrEnum; } // if DEFAULT_SCHEMA_ACCESS_ONLY is on, // users cannot change the following CQDs if ( getState() >= SET_BY_CQD && getToken(DEFAULT_SCHEMA_ACCESS_ONLY) == DF_ON ) { if (attrEnum == SCHEMA || attrEnum == PUBLIC_SCHEMA_NAME || attrEnum == DEFAULT_SCHEMA_NAMETYPE || attrEnum == PUBLISHING_ROLES) { if (!isResetAll()) // no error msg for cqd * reset *CmpCommon::diags() << DgSqlCode(-30043) << DgString0(attrName); return attrEnum; } } } else { // ignore LAST0_MODE cqd if we are in secondary mxcmp or if // internal_query_from_exeutil is set. This cqd is not meant // to apply in these cases if ( attrEnum == LAST0_MODE ) return attrEnum; } overwrite = getProvenance(attrEnum) < overwriteIfNotYet; // Put value into canonical form (trimmed, upcased where pertinent). // // Possibly revert to initial default default value -- see NOTE 3 up above. // Note further that ANSI names cannot revert on values of // 'SYSTEM' or 'ENABLE', as those are legal cat/sch/tbl names, // nor can they revert on '' (empty/blank), as ANSI requires us to // emit a syntax error for this. // // Possibly RESET to read-from-table value (before any CQD value). // TrimNAStringSpace(value); if (validator(attrEnum) != &validateAnsiName && !reset) { validator(attrEnum)->applyUpper(value); if (isSynonymOfSYSTEM(attrEnum, value)) value = getDefaultDefaultValue(attrEnum); else if (isSynonymOfRESET(value)) // CQD attr 'RESET'; ... reset = 1; } if (reset) { // CQD attr RESET; if ((isNonResetableAttribute(attrName)) && (reset != 3)) return attrEnum; if (!resetToDefaults_[attrEnum]) { if (overwrite) value = currentDefaults_[attrEnum]; // return actual val to caller if (attrEnum == ISOLATION_LEVEL) { // reset this in the global area TransMode::IsolationLevel il; getIsolationLevel(il); CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il); } // Solution: 10-060418-5903. Do not update MXCMP global access mode // with CQD ISOLATION_LEVEL_FOR_UPDATES as it will overwrite that // set by ISOLATION_LEVE. The CQD ISOLATION_LEVEL_FOR_UPDATES is // always accessed directly when necessary. //else if (attrEnum == ISOLATION_LEVEL_FOR_UPDATES) // { // // reset this in the global area // TransMode::IsolationLevel il; // getIsolationLevel(il, getToken(attrEnum)); // CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il, // FALSE); // } return attrEnum; } value = resetToDefaults_[attrEnum]; // fall thru, REINSERT this val } if (attrEnum == CATALOG) { if (!setCatalog(value, errOrWarn, overwrite)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else { if (getState() == READ_FROM_SQL_TABLE) { // set the volatile catalog to be same as the catalog read from // defaults table. If there is no catalog or volatile_catalog // specified in the defaults table, then volatile catalog name // will be the default catalog in use in the session where // volatile tables are created. CmpCommon::context()->sqlSession()->setVolatileCatalogName(value); } } } else if (attrEnum == SCHEMA) { if (!setSchema(value, errOrWarn, overwrite)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else { if (getState() == READ_FROM_SQL_TABLE) { // set the volatile catalog to be same as the catalog read from // defaults table. If there is no catalog or volatile_catalog // specified in the defaults table, then volatile catalog name // will be the default catalog in use in the session where // volatile tables are created. NAString cat(getValue(CATALOG)); CmpCommon::context()->sqlSession()->setVolatileCatalogName(cat); } } } else { if ( attrEnum == MAX_LONG_VARCHAR_DEFAULT_SIZE || attrEnum == MAX_LONG_WVARCHAR_DEFAULT_SIZE ) { ULng32 minLength; switch (attrEnum) { case MAX_LONG_VARCHAR_DEFAULT_SIZE: minLength = (Lng32)getAsULong(MIN_LONG_VARCHAR_DEFAULT_SIZE); break; case MAX_LONG_WVARCHAR_DEFAULT_SIZE: minLength = (Lng32)getAsULong(MIN_LONG_WVARCHAR_DEFAULT_SIZE); break; default: attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } if ( attrEnum != __INVALID_DEFAULT_ATTRIBUTE ) { UInt32 newMaxLength; Int32 n = -1; sscanf(value.data(), "%u%n", &newMaxLength, &n); if ( n>0 && (UInt32)n == value.length() ) { // a valid unsigned number if ( newMaxLength < minLength ) { *CmpCommon::diags() << DgSqlCode(-2030) << DgInt0((Lng32)minLength); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } } } if ( attrEnum == MIN_LONG_VARCHAR_DEFAULT_SIZE || attrEnum == MIN_LONG_WVARCHAR_DEFAULT_SIZE ) { ULng32 maxLength; switch (attrEnum) { case MIN_LONG_VARCHAR_DEFAULT_SIZE: maxLength = getAsULong(MAX_LONG_VARCHAR_DEFAULT_SIZE); break; case MIN_LONG_WVARCHAR_DEFAULT_SIZE: maxLength = getAsULong(MAX_LONG_WVARCHAR_DEFAULT_SIZE); break; default: attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } if ( attrEnum != __INVALID_DEFAULT_ATTRIBUTE ) { UInt32 newMinLength; Int32 n = -1; sscanf(value.data(), "%u%n", &newMinLength, &n); if ( n>0 && (UInt32)n == value.length() ) { // a valid unsigned number if ( newMinLength > maxLength ) { *CmpCommon::diags() << DgSqlCode(-2029) << DgInt0((Lng32)maxLength); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } } } if (errOrWarn && (attrEnum == ROUNDING_MODE)) { if (NOT ((value.length() == 1) && ((*value.data() == '0') || (*value.data() == '1') || (*value.data() == '2')))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } if ( attrEnum == SCRATCH_MAX_OPENS_HASH || attrEnum == SCRATCH_MAX_OPENS_SORT ) { if (NOT ((value.length() == 1) && ((*value.data() == '1') || (*value.data() == '2') || (*value.data() == '3') || (*value.data() == '4')))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } if (attrEnum != __INVALID_DEFAULT_ATTRIBUTE) { // We know that the MP_COLLATIONS validator emits only warnings // and always returns TRUE. On the validate-but-do-not-insert step // (CQD compilation), those warnings will be seen by the user. // On the validate-AND-insert (CQD execution), there is no need // to repeat them (besides, that causes Executor to choke on the // warnings in the diags and say 'Error fetching from TCB tree'). Int32 isValid = TRUE; if (!overwrite || currentState_ < SET_BY_CQD || validator(attrEnum) != &validateCollList) isValid = validator(attrEnum)->validate(value, this, attrEnum, errOrWarn); // if an internal reset is being done, then make it a valid attr // even if the 'validate' method above returned invalid. if ((!isValid) && (isNonResetableAttribute(attrName)) && (reset == 3)) { isValid = TRUE; } if (!isValid) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else if (overwrite) { if (isValid == SilentIfSYSTEM) { // defDef value was "SYSTEM" or "" // Undo any caching from getFloat() NADELETEBASIC(currentFloats_[attrEnum], NADHEAP); currentFloats_[attrEnum] = NULL; // Undo any caching from getToken() NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; // Now fall thru to insert the string "SYSTEM" or "" } if (!insert(attrEnum, value, errOrWarn)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } // overwrite (i.e. insert) } } // not special val/ins for CAT, SCH, or MPLOC } // valid attrName if (attrEnum >= 0) { if (overwrite) { if ((! reset) && (currentState_ == SET_BY_CQD)) { // indicate that this attribute was set by a user CQD. setUserDefault(attrEnum, TRUE); } switch (attrEnum) { case CATALOG: case SCHEMA: break; case ISOLATION_LEVEL: { // Ansi 14.1 SR 4. See comexe/ExControlArea::addControl(). //## I now think this implementation is wrong //## because this is setting GLOBAL state //## for something that should be CONTEXT-dependent. //## Will cause us headaches later, when we //## make arkcmp be a multi-context multi-threaded server. TransMode::IsolationLevel il; getIsolationLevel(il); CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il); } break; // Solution: 10-060418-5903. Do not update MXCMP global access mode // with CQD ISOLATION_LEVEL_FOR_UPDATES as it will overwrite that // set by ISOLATION_LEVEL. The CQD ISOLATION_LEVEL_FOR_UPDATES is // always accessed directly when necessary. //case ISOLATION_LEVEL_FOR_UPDATES: //{ // TransMode::IsolationLevel il; // getIsolationLevel(il, getToken(attrEnum)); // CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il, // FALSE); //} //break; case ALLOW_INCOMPATIBLE_ASSIGNMENT: case ALLOW_INCOMPATIBLE_COMPARISON: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_OPERATIONS, val, errOrWarn); } break; case MODE_SPECIAL_1: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_OPERATIONS, val, errOrWarn); // find_suitable_key to be turned off in this mode, unless // it has been explicitely set. if (getToken(VOLATILE_TABLE_FIND_SUITABLE_KEY) == DF_SYSTEM) { insert(VOLATILE_TABLE_FIND_SUITABLE_KEY, "OFF", errOrWarn); } } break; case MODE_SPECIAL_4: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_OPERATIONS, val, errOrWarn); insert(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, val, errOrWarn); NAString csVal; if (value == "ON") csVal = SQLCHARSETSTRING_UTF8; else csVal = ""; validateAndInsert("TRAF_DEFAULT_COL_CHARSET", csVal, FALSE, errOrWarn); NAString notVal; if (value == "ON") notVal = "OFF"; else notVal = "ON"; insert(TRAF_COL_LENGTH_IS_CHAR, notVal, errOrWarn); NAString costVal1; NAString costVal2; if (value == "ON") { costVal1 = "8.0"; costVal2 = "16.0" ; } else { costVal1 = "1.0"; costVal2 = "1.0" ; } validateAndInsert("NCM_IND_JOIN_COST_ADJ_FACTOR", costVal1, FALSE, errOrWarn); validateAndInsert("NCM_IND_SCAN_COST_ADJ_FACTOR", costVal2, FALSE, errOrWarn); if (value == "ON") Set_SqlParser_Flags(IN_MODE_SPECIAL_4); else Reset_SqlParser_Flags(IN_MODE_SPECIAL_4); } break; case MODE_SEABASE: { if (value == "ON") { if (NOT seabaseDefaultsTableRead()) { CmpSeabaseDDL cmpSBD((NAHeap *)heap_); Lng32 errNum = cmpSBD.validateVersions(this); if (errNum == 0) // seabase is initialized properly { // read from seabase defaults table cmpSBD.readAndInitDefaultsFromSeabaseDefaultsTable (overwriteIfNotYet, errOrWarn, this); } else { CmpCommon::context()->setIsUninitializedSeabase(TRUE); CmpCommon::context()->uninitializedSeabaseErrNum() = errNum; } } NAString sbCat = getValue(SEABASE_CATALOG); insert(SEABASE_VOLATILE_TABLES, "ON", errOrWarn); CmpCommon::context()->sqlSession()->setVolatileCatalogName(sbCat, TRUE); insert(UPD_SAVEPOINT_ON_ERROR, "OFF", errOrWarn); } else { NAString defCat = getValue(CATALOG); insert(SEABASE_VOLATILE_TABLES, "OFF", errOrWarn); CmpCommon::context()->sqlSession()->setVolatileCatalogName(defCat); insert(UPD_SAVEPOINT_ON_ERROR, "ON", errOrWarn); } } break; case MEMORY_LIMIT_QCACHE_UPPER_KB: CURRENTQCACHE->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_HISTCACHE_UPPER_KB: CURRCONTEXT_HISTCACHE->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_CMPSTMT_UPPER_KB: STMTHEAP->setUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_CMPCTXT_UPPER_KB: CTXTHEAP->setUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_NATABLECACHE_UPPER_KB: ActiveSchemaDB()->getNATableDB()->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case NAMETYPE: SqlParser_NADefaults_->NAMETYPE_ = token(NAMETYPE, value, TRUE); break; case NATIONAL_CHARSET: SqlParser_NADefaults_->NATIONAL_CHARSET_ = CharInfo::getCharSetEnum(value); break; case SESSION_ID: { CmpCommon::context()->sqlSession()->setSessionId(value); } break; case SESSION_USERNAME: { CmpCommon::context()->sqlSession()->setSessionUsername(value); } break; case SESSION_IN_USE: { CmpCommon::context()->sqlSession()->setSessionInUse ((getToken(attrEnum) == DF_ON)); } break; case SQLMX_REGRESS: { if (value == "ON") { insert(COMP_BOOL_157, "ON", errOrWarn); insert(SHOWDDL_DISPLAY_FORMAT, "INTERNAL", errOrWarn); insert(MODE_SPECIAL_1, "OFF", errOrWarn); if (getToken(VOLATILE_TABLE_FIND_SUITABLE_KEY) == DF_SYSTEM) { insert(VOLATILE_TABLE_FIND_SUITABLE_KEY, "OFF", errOrWarn); } char * env = getenv("SQLMX_REGRESS"); if (env) CmpCommon::context()->setSqlmxRegress(atoi(env)); else CmpCommon::context()->setSqlmxRegress(1); } else { insert(COMP_BOOL_157, "OFF", errOrWarn); insert(SHOWDDL_DISPLAY_FORMAT, "EXTERNAL", errOrWarn); CmpCommon::context()->setSqlmxRegress(0); } } break; case VOLATILE_CATALOG: { CmpCommon::context()->sqlSession()->setVolatileCatalogName(value); } break; case VOLATILE_SCHEMA_IN_USE: { CmpCommon::context()->sqlSession()->setVolatileSchemaInUse ((getToken(attrEnum) == DF_ON)); } break; case ISO_MAPPING: { SqlParser_NADefaults_->ISO_MAPPING_ = CharInfo::getCharSetEnum(value); // Set the NAString_isoMappingCS memory cache for use by routines // ToInternalIdentifier() and ToAnsiIdentifier[2|3]() in module // w:/common/NAString[2].cpp. These routines currently cannot // access SqlParser_ISO_MAPPING directly due to the complex // build hierarchy. NAString_setIsoMapCS((SQLCHARSET_CODE) SqlParser_NADefaults_->ISO_MAPPING_); } break; case DEFAULT_CHARSET: { SqlParser_NADefaults_->DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(value); SqlParser_NADefaults_->ORIG_DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(value); } break; case QUERY_TEXT_CACHE: { // If public schema is in use, query text cache has to be off NAString pSchema = getValue(PUBLIC_SCHEMA_NAME); if (pSchema != "") value = "OFF"; } break; case PUBLIC_SCHEMA_NAME: { // when PUBLIC_SCHEMA is used, turn off Query Text Cache if ( (value != "") && !(getToken(QUERY_TEXT_CACHE) == DF_OFF) ) insert(QUERY_TEXT_CACHE, "OFF"); // when PUBLIC_SCHEMA is not used, reset to the default value if ( value == "" ) { NAString v(""); validateAndInsert("QUERY_TEXT_CACHE", v, TRUE); } } break; case LDAP_USERNAME: { // when the LDAP_USERNAME is set (first time by CLI) // if DEFAULT_SCHEMA_NAMETYPE is USER, set schema to LDAP_USERNAME if ( !value.isNull() && (getToken(DEFAULT_SCHEMA_NAMETYPE) == DF_USER) && !userDefault(SCHEMA) && // do not change user setting ( schSetToUserID() || // only when schema was initialized to guardian id schSetByNametype() ) ) // or changed by same CQD { setSchemaAsLdapUser(value); setSchByNametype(TRUE); } } break; case DEFAULT_SCHEMA_ACCESS_ONLY: { if ( value == "ON" ) { NAString schemaNameType = getValue(DEFAULT_SCHEMA_NAMETYPE); if ( schemaNameType == "USER" ) { setSchemaAsLdapUser(); } } } break; case DEFAULT_SCHEMA_NAMETYPE: { if ( userDefault(SCHEMA) ) // if SCHEMA has been changed by user, do nothing break; if ( value == "SYSTEM" ) // reset to default schema { if ( schSetByNametype() ) // only when schema was changed by this CQD { // do not change catSchSetToUserID_ flag Int32 preVal = catSchSetToUserID_; NAString v(""); validateAndInsert("SCHEMA", v, TRUE); catSchSetToUserID_ = preVal; } } if ( value == "USER" ) // set default schema to ldpa username { if ( schSetToUserID() || // only when schema was initialized to guardian id schSetByNametype() ) // or was changed by this CQD { setSchemaAsLdapUser(); setSchByNametype(TRUE); } } } break; case USTAT_IUS_PERSISTENT_CBF_PATH: { // if the CBF path is SYSTEM, set it to $HOME/cbfs if ( value == "SYSTEM" ) { const char* home = getenv("HOME"); if ( home ) { value = home; value += "/cbfs"; validateAndInsert("USTAT_IUS_PERSISTENT_CBF_PATH", value, FALSE); } } } break; case TRAF_LOAD_ERROR_LOGGING_LOCATION: { if (value.length() > 512) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } } break; case AGGRESSIVE_ESP_ALLOCATION_PER_CORE: { NABoolean useAgg = (getToken(attrEnum) == DF_ON); float numESPsPerCore = computeNumESPsPerCore(useAgg); char valuestr[WIDEST_CPUARCH_VALUE]; ftoa_(numESPsPerCore, valuestr); NAString val(valuestr); insert(MAX_ESPS_PER_CPU_PER_OP, val, errOrWarn); } break; // max char col length is defined in common/ComSmallDefs.h. // In special cases, it could be overridden. Internal use only or // use only under trafodion supervision. case TRAF_MAX_CHARACTER_COL_LENGTH: { NABoolean override = (getToken(TRAF_MAX_CHARACTER_COL_LENGTH_OVERRIDE) == DF_ON); double d = atof(value.data()); if ((NOT override) && (NOT (d >= 0 && d <= MAX_CHAR_COL_LENGTH_IN_BYTES))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } } break; case TRAF_MAX_CHARACTER_COL_LENGTH_OVERRIDE: { // if override is being turned off, reset max_char_len to default value. if (value == "OFF") { NAString val; validateAndInsert("TRAF_MAX_CHARACTER_COL_LENGTH", val, TRUE); } } break; default: break; } } // code to valid overwrite (insert) if (reset && overwrite) { // CONTROL QUERY DEFAULT attr RESET; -- this code cloned above // Can't reset prov, because to which? // provenances_[attrEnum] = READ_FROM_SQL_TABLE or COMPUTED ?? NADELETEBASIC(resetToDefaults_[attrEnum], NADHEAP); resetToDefaults_[attrEnum] = NULL; } else if (!overwrite && errOrWarn && getProvenance(attrEnum) >= IMMUTABLE) { *CmpCommon::diags() << DgSqlCode(ERRWARN(2200)) << DgString0(lookupAttrName(attrEnum, errOrWarn)); } } // valid attrName return attrEnum; } // NADefaults::validateAndInsert() float NADefaults::computeNumESPsPerCore(NABoolean aggressive) { #define DEFAULT_ESPS_PER_NODE 2 // for conservation allocation #define DEFAULT_ESPS_PER_CORE 0.5 // for aggressive allocation // Make sure the gpClusterInfo points at an NAClusterLinux object. // In osim simulation mode, the pointer can point at a NAClusterNSK // object, for which the method numTSEsForPOS() is not defined. NAClusterInfoLinux* gpLinux = dynamic_cast<NAClusterInfoLinux*>(gpClusterInfo); assert(gpLinux); // cores per node Lng32 coresPerNode = gpClusterInfo->numberOfCpusPerSMP(); if ( aggressive ) { float totalMemory = gpLinux->totalMemoryAvailable(); // per Node, in KB totalMemory /= (1024*1024); // per Node, in GB totalMemory /= coresPerNode ; // per core, in GB totalMemory /= 2; // per core, 2GB per ESP return MINOF(DEFAULT_ESPS_PER_CORE, totalMemory); } else { Lng32 numESPsPerNode = DEFAULT_ESPS_PER_NODE; return (float)(numESPsPerNode)/(float)(coresPerNode); } // The following lines of code are comment out but retained for possible // future references. // // // number of POS TSE // Lng32 numTSEsPerCluster = gpLinux->numTSEsForPOS(); // // // cluster nodes // Lng32 nodesdPerCluster = gpClusterInfo->getTotalNumberOfCPUs(); // // // TSEs per node // Lng32 TSEsPerNode = numTSEsPerCluster/nodesdPerCluster; // // // // // For Linux/nt, we conservatively allocate ESPs per node as follows // // - 1 ESP per 2 cpu cores if cores are equal or less than TSEs // // - 1 ESP per TSE if number of cores is more than double the TSEs // // - 1 ESP per 2 TSEs if cores are more than TSEs but less than double the TSEs // // - 1 ESP per node. Only possible on NT or workstations // // - number of cores less than TSEs and there are 1 or 2 cpur cores per node // // - number of TSEs is less than cpu cores and there 1 or 2 TSEs per node. // // This case is probable if virtual nodes are used // // // TSEsPerNode is 0 for arkcmps started by the seapilot universal comsumers // // in this case we only consider cpu cores // if ( coresPerNode <= TSEsPerNode || TSEsPerNode == 0 ) // { // if (coresPerNode > 1) // numESPsPerNode = DEFAULT_ESPS_PER_NODE; // } // else if (coresPerNode > (TSEsPerNode*2)) // { // numESPsPerNode = TSEsPerNode; // } // else if (TSEsPerNode > 1) // { // numESPsPerNode = TSEsPerNode/2; // } // else // not really needed since numESPsPerNode is set to 1 from above // { // numESPsPerNode = DEFAULT_ESPS_PER_NODE; // } // // return (float)(numESPsPerNode)/(float)(coresPerNode); } enum DefaultConstants NADefaults::holdOrRestore (const char *attrName, Lng32 holdOrRestoreCQD) { DefaultConstants attrEnum = __INVALID_DEFAULT_ATTRIBUTE; if (holdOrRestoreCQD == 0) { *CmpCommon::diags() << DgSqlCode(-2050) << DgString0(attrName); return attrEnum; } // Perform a lookup for the string, using a binary search. attrEnum = lookupAttrName(attrName, -1); if (attrEnum < 0) { *CmpCommon::diags() << DgSqlCode(-2050) << DgString0(attrName); return attrEnum; } char * value = NULL; if (holdOrRestoreCQD == 1) // hold cqd { if (currentDefaults_[attrEnum]) { value = new NADHEAP char[strlen(currentDefaults_[attrEnum]) + 1]; strcpy(value, currentDefaults_[attrEnum]); } else { value = new NADHEAP char[strlen(defaultDefaults[defDefIx_[attrEnum]].value) + 1]; strcpy(value, defaultDefaults[defDefIx_[attrEnum]].value); } if (! heldDefaults_[attrEnum]) heldDefaults_[attrEnum] = new NADHEAP HeldDefaults(); heldDefaults_[attrEnum]->pushDefault(value); } else { // restore cqd from heldDefaults_ array, if it was held. if (! heldDefaults_[attrEnum]) return attrEnum; value = heldDefaults_[attrEnum]->popDefault(); if (! value) return attrEnum; // there is an odd semantic that if currentDefaults_[attrEnum] // is null, we leave it as null, but pop a held value anyway; // this semantic was preserved when heldDefaults_ was converted // to a stack. if (currentDefaults_[attrEnum]) { // do a validateAndInsert so the caches (such as currentToken_) // get updated and so appropriate semantic actions are taken. // Note that validateAndInsert will take care of deleting the // storage currently held by currentDefaults_[attrEnum]. NAString valueS(value); validateAndInsert(lookupAttrName(attrEnum), // sad that we have to do a lookup again valueS, FALSE); } NADELETEBASIC(value, NADHEAP); } return attrEnum; } const SqlParser_NADefaults *NADefaults::getSqlParser_NADefaults() { return SqlParser_NADefaults_; } static void setCatSchErr(NAString &value, Lng32 sqlCode, Int32 errOrWarn, NABoolean catErr = FALSE) { if (!sqlCode || !errOrWarn) return; TrimNAStringSpace(value); // prettify further (neater errmsg) *CmpCommon::diags() << DgSqlCode(ERRWARN(sqlCode)) << DgCatalogName(value) << DgSchemaName(value) << DgString0(value) << DgString1(value); if (value.first('"') == NA_NPOS) { // delimited names too complicated ! NAString namepart = value; size_t dot = value.first('.'); if (dot != NA_NPOS) { namepart.remove(dot); if (!IsSqlReservedWord(namepart)) { namepart = value; namepart.remove(0, dot+1); } } if (IsSqlReservedWord(namepart)) { *CmpCommon::diags() << DgSqlCode(ERRWARN(3128)) << DgString0(namepart) << DgString1(namepart); return; } } // must determine if the defaults have been set up before parseDML is called if (IdentifyMyself::GetMyName() == I_AM_UNKNOWN){ return; // diagnostic already put into diags above. } // Produce additional (more informative) syntax error messages, // trying delimited-value first and then possibly regular-value-itself. Parser parser(CmpCommon::context()); Lng32 errs = CmpCommon::diags()->getNumber(DgSqlCode::ERROR_); NAString pfx(catErr ? "SET CATALOG " : "SET SCHEMA "); NAString stmt; char c = *value.data(); if (c && c != '\"') { stmt = pfx; stmt += "\""; stmt += value; stmt += "\""; stmt += ";"; #pragma nowarn(1506) // warning elimination parser.parseDML(stmt, stmt.length(), OBJECTNAMECHARSET ); #pragma warn(1506) // warning elimination } if (errs == CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) { stmt = pfx; stmt += value; stmt += ";"; #pragma nowarn(1506) // warning elimination parser.parseDML(stmt, stmt.length(), OBJECTNAMECHARSET ); #pragma warn(1506) // warning elimination } // Change errors to warnings if errOrWarn is +1 (i.e. warning). if (errOrWarn > 0) NegateAllErrors(CmpCommon::diags()); } NABoolean NADefaults::setCatalog(NAString &value, Int32 errOrWarn, NABoolean overwrite, NABoolean alreadyCanonical) { setCatUserID(currentState_ == COMPUTED); // The input value is in external (Ansi) format. // If we are in the COMPUTED currentState_, // make the value strictly canonical, // and try non-delimited first, then delimited. // Prettify removes lead/trailing blanks, // and upcases where unquoted (for nicer errmsgs); // ComSchemaName parses/validates. // if (alreadyCanonical) ; // leave it alone, for performance's sake else if (currentState_ == COMPUTED) { // ' SQL.FOO' TrimNAStringSpace(value); // 'SQL.FOO' NAString tmp(value); value = ToAnsiIdentifier(value); // nondelim ok? if (value.isNull()) value = NAString("\"") + tmp + "\""; // '"SQL.FOO"' } else PrettifySqlText(value); ComSchemaName nam(value); if (nam.getSchemaNamePart().isEmpty() || // 0 name parts, if *any* error !nam.getCatalogNamePart().isEmpty()) { // 2 parts (cat.sch) is an error setCatSchErr(value, EXE_INVALID_CAT_NAME, errOrWarn, TRUE); return FALSE; // invalid value } else { // Get the 1 name part (the "schema" part as far as ComSchema knows...) if (overwrite) insert(CATALOG, nam.getSchemaNamePartAsAnsiString()); return TRUE; } } NABoolean NADefaults::setSchema(NAString &value, Int32 errOrWarn, NABoolean overwrite, NABoolean alreadyCanonical) { // if this is part of CQD *RESET and it was initialized with role name // do not change the following flags // to allow DEFAULT_SCHEMA_NAMETYPE to set its value if (!( schSetToUserID() && isResetAll() )) { setSchUserID(currentState_ == COMPUTED); setSchByNametype(FALSE); } if (alreadyCanonical) ; // leave it alone, for performance's sake else if (currentState_ == COMPUTED) { // ' SQL.FOO' TrimNAStringSpace(value); // 'SQL.FOO' NAString tmp(value); value = ToAnsiIdentifier(value); // nondelim ok? if (value.isNull()) value = NAString("\"") + tmp + "\""; // '"SQL.FOO"' } else PrettifySqlText(value); ComSchemaName nam(value); if (nam.getSchemaNamePart().isEmpty()) { // 0 name parts, if *any* error setCatSchErr(value, EXE_INVALID_SCH_NAME, errOrWarn); return FALSE; // invalid value } else { if (overwrite) insert(SCHEMA, nam.getSchemaNamePartAsAnsiString()); // If 2 parts, overwrite any prior catalog default if (!nam.getCatalogNamePart().isEmpty()) { if (overwrite) { insert(CATALOG, nam.getCatalogNamePartAsAnsiString()); if (currentState_ == SET_BY_CQD) { // indicate that this attribute was set by a user CQD. setUserDefault(CATALOG, TRUE); } } } return TRUE; } } NAString NADefaults::keyword(DefaultToken tok) { CMPASSERT(tok >= 0 && tok < DF_lastToken); return keywords_[tok]; } // Defaults Tokens // There is a set of keywords which can appear as values of Defaults entries // in the Defaults Table. We declare, for each such token, a string (the // keyword), and an enumeration value. The string values belong in an // array, DFkeywords, in sorted order. The idea is we can use binary // search in order to obtain the index of a string to the matching // entry in this sorted array. // // If we define the enumerations carefully (pay attention here!), then // that index we just found (see previous paragraph) is the enum value // of the token. // In simple words: this has to be in identical order with enum DefaultToken // in DefaultConstants.h const char *NADefaults::keywords_[DF_lastToken] = { "ACCUMULATED", "ADVANCED", "AGGRESSIVE", "ALL", "ANSI", "BEGINNER", "BOTH", "CLEAR", "DEBUG", "DISK", "DISK_POOL", "DUMP", "DUMP_MV", "EXTERNAL", "EXTERNAL_DETAILED", "FIRSTROW", "HARDWARE", "HEAP", "HIGH", "HYBRID", "IEEE", "INDEXES", "INTERNAL", "IQS", "JNI", "JNI_TRX", "KEYINDEXES", "LASTROW", "LATEST", "LEAF", "LOADNODUP", "LOCAL", "LOCAL_NODE", "LOG", "MAXIMUM", "MEDIUM", "MEDIUM_LOW", "MERGE", "MINIMUM", "MMAP", "MULTI_NODE", "MVCC", "NONE", "OFF", "ON", "OPENS_FOR_WRITE", "OPERATOR", "OPTIMAL", "ORDERED", "PERTABLE", "PRINT", "PRIVATE", "PUBLIC", "QS", "READ_COMMITTED", "READ_UNCOMMITTED", "RELEASE", "REMOTE", "REPEATABLE_READ", "REPLACE", "REPSEL", "RESOURCES", "RETURN", "ROOT", "SAMPLE", "SERIALIZABLE", "SHORTANSI", "SIMPLE", "SKIP", "SMD", "SOFTWARE", "SOURCE", "SQLMP", "SSCC", "SSD", "STOP", "SUFFIX", "SYSTEM", "TANDEM", "THRIFT", "USER", "VERTICAL", "WAIT", "WARN", "XML" }; // To call bsearch we must satisfy each of its arguments. Either // NULL comes back, or, comes back a pointer to the element which is // a true match for our key. bsearch.key is upperKey.data(). // bsearch.base is keywords_. nel is DF_lastToken. // The next argument is sizeof char*. Finally, the comparison // function can simply be the strcmp function. // // Note that this function makes heavy reliance on the idea that // the DefaultToken enumerations go up in sequence 0, 1, 2, 3... . // // We do the cast on strcmp because its signature from the header // file is: int (*)(const char *, const char *). In general, we're // doing a lot of type casting in here. static Int32 stringCompare(const void* s1, const void* s2) { return strcmp( * (char**) s1, * (char**) s2); } DefaultToken NADefaults::token(Int32 attrEnum, NAString &value, NABoolean valueAlreadyGotten, Int32 errOrWarn) const { ATTR_RANGE_ASSERT; if (!valueAlreadyGotten) { value = getValue(attrEnum); // already trim & upper (by validateAndInsert) TrimNAStringSpace(value); // can't trust that the stored value is canonical } else { TrimNAStringSpace(value); // can't trust that input value is canonical, value.toUpper(); // so here do what validateAndInsert does } DefaultToken tok = DF_noSuchToken; if (value.isNull()) tok = DF_SYSTEM; else { if ((attrEnum == TERMINAL_CHARSET) || (attrEnum == USE_HIVE_SOURCE) || (attrEnum == HIVE_FILE_CHARSET) || (attrEnum == HBASE_DATA_BLOCK_ENCODING_OPTION) || (attrEnum == HBASE_COMPRESSION_OPTION)) return DF_USER; if ( attrEnum == NATIONAL_CHARSET || attrEnum == DEFAULT_CHARSET || attrEnum == HIVE_DEFAULT_CHARSET || attrEnum == ISO_MAPPING || attrEnum == INPUT_CHARSET || attrEnum == TRAF_DEFAULT_COL_CHARSET ) { CharInfo::CharSet cs = CharInfo::getCharSetEnum(value); Int32 err_found = 0; if ( !CharInfo::isCharSetSupported(cs) ) { err_found = 1; } else { switch( attrEnum ) { case NATIONAL_CHARSET: if (cs == CharInfo::KANJI_MP) break; //Allow (for regression test) if ((cs != CharInfo::UNICODE) && (cs != CharInfo::ISO88591)) err_found = 1; break; case DEFAULT_CHARSET: if (cs != CharInfo::ISO88591 && cs != CharInfo::UTF8 // && cs != CharInfo::SJIS ) err_found = 1; break; case HIVE_DEFAULT_CHARSET: case TRAF_DEFAULT_COL_CHARSET: if ((cs != CharInfo::UTF8) && (cs != CharInfo::ISO88591)) err_found = 1; break; case ISO_MAPPING: if (cs != CharInfo::ISO88591) err_found = 1; break; default: break; } } if ( (err_found != 0) && errOrWarn ) *CmpCommon::diags() << DgSqlCode(ERRWARN(3010)) << DgString0(value); else return DF_USER; // kludge, return any valid token } //else //else fall thru to see if value is SYSTEM // OPTIMIZATION_LEVEL if ((attrEnum == OPTIMIZATION_LEVEL) && value.length() == 1) switch (*value.data()) { case '0': return DF_MINIMUM; case '1': return DF_MINIMUM; case '2': return DF_MEDIUM_LOW; case '3': return DF_MEDIUM; case '4': return DF_MEDIUM; case '5': return DF_MAXIMUM; } // PCODE_OPT_LEVEL if ((attrEnum == PCODE_OPT_LEVEL) && value.length() == 1) switch (*value.data()) { case '0': return DF_MINIMUM; case '1': return DF_MEDIUM; case '2': return DF_HIGH; case '3': return DF_MAXIMUM; } // HBASE_FILTER_PREDS if ((attrEnum == HBASE_FILTER_PREDS) && value.length()==1) switch (*value.data()){ case '0': return DF_OFF; case '1': return DF_MINIMUM; case '2': return DF_MEDIUM; // in the future add DF_HIGH and DF_MAXIMUM when we implement more // pushdown capabilities } if ( attrEnum == TEMPORARY_TABLE_HASH_PARTITIONS || attrEnum == MVQR_REWRITE_CANDIDATES || attrEnum == MVQR_PUBLISH_TABLE_LOCATION || attrEnum == MVQR_WORKLOAD_ANALYSIS_MV_NAME || attrEnum == HIST_SCRATCH_VOL) return DF_SYSTEM; const char *k = value.data(); char *match = (char*) bsearch( &k, keywords_, DF_lastToken, sizeof(char*), stringCompare); if (match) tok = (DefaultToken) (((const char**) match) - keywords_); else { // Check for synonyms const char *c = value; for (; *c == '0'; c++) ; // all ascii '0' ? if (*c == '\0') // terminating nul '\0' tok = DF_OFF; else if (value.length() <= 2) { if (value == "1" || value == "+1" || value == "-1") tok = DF_ON; } else { if ((value == "STOP_AT") || (value == "STOP AT")) tok = DF_STOP; else if (value == "READ COMMITTED") tok = DF_READ_COMMITTED; else if (value == "READ UNCOMMITTED") tok = DF_READ_UNCOMMITTED; else if (value == "REPEATABLE READ") tok = DF_REPEATABLE_READ; else if (value == "BEGINNER") tok = DF_BEGINNER; else if (value == "ADVANCED") tok = DF_ADVANCED; #define CONVERT_SYNONYM(from,to) \ else if (value == "" # from "") { \ CMPASSERT(DF_ ## from == DF_ ## to); \ tok = DF_ ## to; \ } CONVERT_SYNONYM(COMPAQ, TANDEM) CONVERT_SYNONYM(DISABLE, OFF) CONVERT_SYNONYM(ENABLE, SYSTEM) CONVERT_SYNONYM(FALSE, OFF) CONVERT_SYNONYM(FULL, MAXIMUM) CONVERT_SYNONYM(TRUE, ON) } } } NABoolean isValid = FALSE; if (tok != DF_noSuchToken) switch (attrEnum) { case DEFAULT_SCHEMA_ACCESS_ONLY: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case DEFAULT_SCHEMA_NAMETYPE: if (tok == DF_SYSTEM || tok == DF_USER) isValid = TRUE; break; case DETAILED_STATISTICS: if (tok == DF_ALL || tok == DF_ACCUMULATED || tok == DF_OPERATOR || tok == DF_PERTABLE || tok == DF_OFF) isValid = TRUE; break; case HIDE_INDEXES: if (tok == DF_NONE || tok == DF_ALL || tok == DF_VERTICAL || tok == DF_INDEXES || tok == DF_KEYINDEXES) isValid = TRUE; break; case HIVE_USE_EXT_TABLE_ATTRS: if (tok == DF_ALL || tok == DF_OFF || tok == DF_ON ) isValid = TRUE; break; case INDEX_ELIMINATION_LEVEL: if (tok == DF_MINIMUM || tok == DF_MEDIUM || tok == DF_MAXIMUM || tok == DF_AGGRESSIVE ) isValid = TRUE; break; case IF_LOCKED: if (tok == DF_RETURN || tok == DF_WAIT) isValid = TRUE; break; case INSERT_VSBB: if (tok == DF_OFF || tok == DF_LOADNODUP || tok == DF_SYSTEM || tok == DF_USER) isValid = TRUE; break; case OVERFLOW_MODE: if (tok == DF_DISK || tok == DF_SSD || tok == DF_MMAP) isValid = TRUE; break; case SORT_ALGO: if(tok == DF_HEAP || tok == DF_IQS || tok == DF_REPSEL || tok == DF_QS) isValid = TRUE; break; case QUERY_TEMPLATE_CACHE: case SHARE_TEMPLATE_CACHED_PLANS: case VSBB_TEST_MODE: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case QUERY_TEXT_CACHE: if (tok == DF_ON || tok == DF_OFF || tok == DF_SYSTEM || tok == DF_SKIP) isValid = TRUE; break; case DISABLE_BUFFERED_INSERTS: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case ISOLATION_LEVEL: { TransMode::IsolationLevel iltmp; isValid = getIsolationLevel(iltmp, tok); } break; case ISOLATION_LEVEL_FOR_UPDATES: { TransMode::IsolationLevel iltmp; isValid = getIsolationLevel(iltmp, tok); } break; case MVGROUP_AUTOMATIC_CREATION: case MV_TRACE_INCONSISTENCY: //++ MV case MV_AS_ROW_TRIGGER: //++ MV { if(DF_ON == tok || DF_OFF == tok) { isValid = TRUE; } } break; case IUD_NONAUDITED_INDEX_MAINT: if (tok == DF_OFF || tok == DF_SYSTEM || tok == DF_WARN || tok == DF_ON) isValid = TRUE; break; case HIVE_SCAN_SPECIAL_MODE: isValid = TRUE; break; case IS_SQLCI: // for primary mxcmp that is invoked for user queries, the only valid // value for mxci_process cqd is TRUE. This cqd is set once by mxci // at startup time and cannot be changed by user. That way we know that // a request has come in from mxci(trusted) process. // For secondary mxcmp's invoked for internal queries where cqd's are // sent using sendAllControls method, all values are valid. This will // ensure that if this default is not set and is sent over to secondary // mxcmp using an internal CQD statement, it doesn't return an error. if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case NVCI_PROCESS: // for primary mxcmp that is invoked for user queries, the only valid // value for nvci_process cqd is TRUE. This cqd is set once by nvci // at startup time and cannot be changed by user. That way we know that // a request has come in from nvci(trusted) process. // For secondary mxcmp's invoked for internal queries where cqd's are // sent using sendAllControls method, all values are valid. This will // ensure that if this default is not set and is sent over to secondary // mxcmp using an internal CQD statement, it doesn't return an error. if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case NAMETYPE: if (tok == DF_ANSI || tok == DF_SHORTANSI) isValid = TRUE; break; case OPTIMIZATION_GOAL: if (tok == DF_FIRSTROW || tok == DF_LASTROW || tok == DF_RESOURCES) isValid = TRUE; break; case USER_EXPERIENCE_LEVEL: if (tok == DF_ADVANCED || tok == DF_BEGINNER) isValid = TRUE; break; case PCODE_OPT_LEVEL: if (tok == DF_OFF) { isValid = TRUE; break; } // else fall through to the next case, all those keywords are allowed // as well case ATTEMPT_ESP_PARALLELISM: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF || tok == DF_MAXIMUM) isValid = TRUE; break; case OPTIMIZATION_LEVEL: if (tok == DF_MINIMUM || tok == DF_MEDIUM_LOW || tok == DF_MEDIUM || tok == DF_MAXIMUM) isValid = TRUE; break; case HBASE_FILTER_PREDS: if(tok == DF_OFF || tok == DF_ON) { if (tok == DF_ON) tok = DF_MINIMUM; // to keep backward compatibility isValid= TRUE; } break; case ROBUST_QUERY_OPTIMIZATION: if (tok == DF_MINIMUM || tok == DF_SYSTEM || tok == DF_MAXIMUM || tok == DF_HIGH) isValid = TRUE; break; case REFERENCE_CODE: case TARGET_CODE: if (tok == DF_RELEASE || tok == DF_DEBUG) isValid = TRUE; break; /* case ROLLBACK_ON_ERROR: if (tok == DF_OFF || tok == DF_ON || tok == DF_SYSTEM) isValid = TRUE; break; */ case AUTO_QUERY_RETRY: if (tok == DF_ON || tok == DF_OFF || tok == DF_SYSTEM) isValid = TRUE; break; case AUTO_QUERY_RETRY_WARNINGS: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case EXE_PARALLEL_DDL: if (tok == DF_OFF || tok == DF_ON || tok == DF_EXTERNAL || tok == DF_INTERNAL) isValid = TRUE; break; case UNAVAILABLE_PARTITION: if (tok == DF_SKIP || tok == DF_STOP) isValid = TRUE; break; case QUERY_CACHE_STATISTICS: // on, off are no-ops if (tok == DF_PRINT || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case QUERY_CACHE_STATEMENT_PINNING: if (tok == DF_CLEAR || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case HJ_TYPE: if (tok == DF_ORDERED || tok == DF_HYBRID || tok == DF_SYSTEM) isValid = TRUE; break; case REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT: if (tok == DF_OFF || tok == DF_ON || tok == DF_SYSTEM) isValid = TRUE; break; case POS: if (tok == DF_LOCAL_NODE || tok == DF_OFF || tok == DF_MULTI_NODE || tok == DF_DISK_POOL) isValid = TRUE; break; case USTAT_INTERNAL_SORT: if (tok == DF_ON || tok == DF_OFF || tok == DF_HYBRID) isValid = TRUE; break; case USTAT_AUTO_FOR_VOLATILE_TABLES: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case SUBQUERY_UNNESTING: if (tok == DF_OFF || tok == DF_ON || tok == DF_DEBUG) isValid = TRUE; break; case SUBQUERY_UNNESTING_P2: if (tok == DF_OFF || tok == DF_ON || tok == DF_INTERNAL) isValid = TRUE; break; case SORT_INTERMEDIATE_SCRATCH_CLEANUP: if(tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case SORT_MEMORY_QUOTA_SYSTEM: if(tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; /* If MDAM_SCAN_METHOD's value is "MAXIMUM" only, Right side of Nested Join will use the MDAM path Allowable values for MDAM_SCAN_METHOD are 'ON' | 'OFF' | 'MAXIMUM' */ case MDAM_SCAN_METHOD: if (tok == DF_ON || tok == DF_OFF || tok == DF_MAXIMUM) isValid = TRUE; break; case SHOWDDL_DISPLAY_FORMAT: if (tok == DF_INTERNAL || tok == DF_EXTERNAL || tok == DF_LOG) isValid = TRUE; break; case SHOWDDL_DISPLAY_PRIVILEGE_GRANTS: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case EXPLAIN_DISPLAY_FORMAT: if (tok == DF_INTERNAL || tok == DF_EXTERNAL || tok == DF_EXTERNAL_DETAILED) isValid = TRUE; break; case UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY: if (tok == DF_ON || tok == DF_OFF || tok == DF_AGGRESSIVE) isValid = TRUE; break; case MVQR_ALL_JBBS_IN_QD: case MVQR_REWRITE_ENABLED_OPTION: case MVQR_REWRITE_SINGLE_TABLE_QUERIES: case MVQR_USE_EXTRA_HUB_TABLES: case MVQR_ENABLE_LOGGING: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case MVQR_LOG_QUERY_DESCRIPTORS: if (tok == DF_OFF || tok == DF_DUMP || tok == DF_DUMP_MV || tok == DF_LOG) isValid = TRUE; break; case MVQR_PRIVATE_QMS_INIT: if (tok == DF_SMD || tok == DF_XML || tok == DF_NONE) isValid = TRUE; break; case MVQR_PUBLISH_TO: if (tok == DF_PUBLIC || tok == DF_PRIVATE || tok == DF_BOTH || tok == DF_NONE) isValid = TRUE; break; case MVQR_WORKLOAD_ANALYSIS_MV_NAME: isValid = TRUE; break; case ELIMINATE_REDUNDANT_JOINS: if (tok == DF_OFF || tok == DF_ON || tok == DF_DEBUG || tok == DF_MINIMUM) isValid = TRUE; break; case VOLATILE_TABLE_FIND_SUITABLE_KEY: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case CAT_DISTRIBUTE_METADATA: if (tok == DF_OFF || tok == DF_LOCAL_NODE || tok == DF_ON) isValid = TRUE; break; case MV_DUMP_DEBUG_INFO: if (tok == DF_OFF || tok == DF_ON) isValid = TRUE; break; case RANGESPEC_TRANSFORMATION: if (tok == DF_OFF || tok == DF_ON || tok == DF_MINIMUM) isValid = TRUE; break; case ASYMMETRIC_JOIN_TRANSFORMATION: if (tok == DF_MINIMUM || tok == DF_MAXIMUM) isValid = TRUE; break; case CAT_DEFAULT_COMPRESSION: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE) isValid = TRUE; break; case REPLICATE_DISK_POOL: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case COMPRESSION_TYPE: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE) isValid = TRUE; break; // The DF_SAMPLE setting indicates that the persistent sample will be // updated incrementally, but not the histograms; they will be created // anew from the incrementally updated sample. case USTAT_INCREMENTAL_UPDATE_STATISTICS: if (tok == DF_OFF || tok == DF_SAMPLE || tok == DF_ON) isValid = TRUE; break; case REPLICATE_COMPRESSION_TYPE: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE || tok == DF_SOURCE || tok == DF_SYSTEM) isValid = TRUE; break; case REUSE_OPENS: if (tok==DF_ON || tok == DF_OFF || tok == DF_OPENS_FOR_WRITE) isValid = TRUE; break; case USE_HIVE_SOURCE: isValid = TRUE; break; case TRAF_SIMILARITY_CHECK: if (tok == DF_ROOT || tok == DF_LEAF || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case TRAF_TABLE_SNAPSHOT_SCAN: if (tok == DF_NONE || tok == DF_SUFFIX || tok == DF_LATEST) isValid = TRUE; break; case LOB_OUTPUT_SIZE: if (tok >=0 && tok <= 512000) isValid = TRUE; break; case LOB_MAX_CHUNK_MEM_SIZE: if (tok >=0 && tok <= 512000) isValid = TRUE; break; case LOB_GC_LIMIT_SIZE: if (tok >= 0 ) isValid=TRUE; case TRAF_TRANS_TYPE: if (tok == DF_MVCC || tok == DF_SSCC) isValid = TRUE; break; case HBASE_RANGE_PARTITIONING_PARTIAL_COLS: if (tok == DF_OFF || tok == DF_MINIMUM || tok == DF_MEDIUM || tok == DF_MAXIMUM || tok == DF_ON) isValid = TRUE; break; case TRAF_UPSERT_MODE: if (tok == DF_MERGE || tok == DF_REPLACE || tok == DF_OPTIMAL) isValid = TRUE; break; // Nothing needs to be added here for ON/OFF/SYSTEM keywords -- // instead, add to DEFAULT_ALLOWS_SEPARATE_SYSTEM code in the ctor. default: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; } // See "NOTE 2" way up top. if (!isValid) { if (tok == DF_SYSTEM) { isValid = isFlagOn(attrEnum, DEFAULT_ALLOWS_SEPARATE_SYSTEM); if (!isValid) { NAString tmp(getDefaultDefaultValue(attrEnum)); isValid = isSynonymOfSYSTEM(attrEnum, tmp); } } } if (!isValid) { tok = DF_noSuchToken; if (errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2055)) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } return tok; } DefaultToken NADefaults::getToken( const Int32 attrEnum, const Int32 errOrWarn ) const { // Check the cache first. if ( currentTokens_[attrEnum] != NULL ) { return *currentTokens_[attrEnum]; } // Get the token and allocate memory to store the token value. NAString tmp( NADHEAP ); currentTokens_[attrEnum] = new NADHEAP DefaultToken; *currentTokens_[attrEnum] = token( attrEnum, tmp, FALSE, errOrWarn ); return *currentTokens_[attrEnum]; } NABoolean NADefaults::getIsolationLevel(TransMode::IsolationLevel &arg, DefaultToken tok) const { NABoolean specifiedOK = TRUE; if (tok == DF_noSuchToken) tok = getToken(ISOLATION_LEVEL); switch (tok) { case DF_READ_COMMITTED: arg = TransMode::READ_COMMITTED_; break; case DF_READ_UNCOMMITTED: arg = TransMode::READ_UNCOMMITTED_; break; case DF_REPEATABLE_READ: arg = TransMode::REPEATABLE_READ_; break; case DF_SERIALIZABLE: case DF_SYSTEM: arg = TransMode::SERIALIZABLE_; break; case DF_NONE: arg = TransMode::IL_NOT_SPECIFIED_; break; default: arg = TransMode::SERIALIZABLE_; specifiedOK = FALSE; NAString value(NADHEAP); if (tok != DF_noSuchToken) value = keyword(tok); *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1("ISOLATION_LEVEL"); } return specifiedOK; } // find the packed length for all the default values stored // in currentDefaults_ array. // currentDefaults_ is a fixed sized array of "char *" where each // entry is pointing to the default value for that default. // After pack, the default values are put in the buffer in // sequential order with a null terminator. Lng32 NADefaults::packedLengthDefaults() { Lng32 size = 0; const size_t numAttrs = numDefaultAttributes(); for (size_t i = 0; i < numAttrs; i++) { size += strlen(currentDefaults_[i]) + 1; } return size; } Lng32 NADefaults::packDefaultsToBuffer(char * buffer) { const size_t numAttrs = numDefaultAttributes(); Lng32 totalSize = 0; Lng32 size = 0; for (UInt32 i = 0; i < numAttrs; i++) { size = (Lng32)strlen(currentDefaults_[i]) + 1; strcpy(buffer, currentDefaults_[i]); buffer += size; totalSize += size; } return totalSize; } Lng32 NADefaults::unpackDefaultsFromBuffer(Lng32 numEntriesInBuffer, char * buffer) { return 0; } NABoolean NADefaults::isSameCQD(Lng32 numEntriesInBuffer, char * buffer, Lng32 bufLen) { const Lng32 numCurrentDefaultAttrs = (Lng32)numDefaultAttributes(); // check to see if the default values in 'buffer' are the same // as those in the currentDefaults_ array. // Return TRUE if they are all the same. if (numCurrentDefaultAttrs != numEntriesInBuffer) return FALSE; if (bufLen == 0) return FALSE; Int32 curPos = 0; for (Int32 i = 0; i < numEntriesInBuffer; i++) { if (strcmp(currentDefaults_[i], &buffer[curPos]) != 0) return FALSE; curPos += strlen(&buffer[curPos]) + 1; } // everything matches. return TRUE; } Lng32 NADefaults::createNewDefaults(Lng32 numEntriesInBuffer, char * buffer) { const Lng32 numCurrentDefaultAttrs = (Lng32)numDefaultAttributes(); // save the current defaults savedCurrentDefaults_ = currentDefaults_; savedCurrentFloats_ = currentFloats_; savedCurrentTokens_ = currentTokens_; // VO, Plan Versioning Support. // // This code may execute in a downrev compiler, which knows about fewer // defaults than the compiler originally used to compile the statement. // Only copy those defaults we know about, and skip the rest. Lng32 numEntriesToCopy = _min (numEntriesInBuffer, numCurrentDefaultAttrs); // allocate a new currentDefaults_ array and make it point to // the default values in the input 'buffer'. // If the current number of default attributes are greater than the // ones in the input buffer, then populate the remaining default // entries in the currentDefaults_ array with the values from the // the savedCurrentDefaults_. currentDefaults_ = new NADHEAP const char * [numCurrentDefaultAttrs]; Int32 curPos = 0; Int32 i = 0; for (i = 0; i < numEntriesToCopy; i++) { currentDefaults_[i] = &buffer[curPos]; curPos += strlen(&buffer[curPos]) + 1; } for (i = numEntriesToCopy; i < numCurrentDefaultAttrs; i++) { currentDefaults_[i] = savedCurrentDefaults_[i]; } // allocate two empty arrays for floats and tokens. currentFloats_ = new NADHEAP float * [numCurrentDefaultAttrs]; currentTokens_ = new NADHEAP DefaultToken * [numCurrentDefaultAttrs]; memset( currentFloats_, 0, sizeof(float *) * numCurrentDefaultAttrs ); memset( currentTokens_, 0, sizeof(DefaultToken *) * numCurrentDefaultAttrs ); return 0; } Lng32 NADefaults::restoreDefaults(Lng32 numEntriesInBuffer, char * buffer) { // Deallocate the currentDefaults_ array. // The array entries are not to be deleted as they point to // entries in 'buffer' or the 'savedCurrentDefaults_'. // See NADefaults::createNewDefaults() method. if (currentDefaults_) { NADELETEBASIC(currentDefaults_, NADHEAP); } if (currentFloats_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentFloats_[i], NADHEAP); NADELETEBASIC(currentFloats_, NADHEAP); } if (currentTokens_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentTokens_[i], NADHEAP); NADELETEBASIC(currentTokens_, NADHEAP); } // restore the saved defaults currentDefaults_ = savedCurrentDefaults_; currentFloats_ = savedCurrentFloats_; currentTokens_ = savedCurrentTokens_; return 0; } void NADefaults::updateCurrentDefaultsForOSIM(DefaultDefault * defaultDefault, NABoolean validateFloatVal) { Int32 attrEnum = defaultDefault->attrEnum; const char * defaultVal = defaultDefault->value; const char * valueStr = currentDefaults_[attrEnum]; if(valueStr) { NADELETEBASIC(valueStr,NADHEAP); } char * value = new NADHEAP char[strlen(defaultVal) + 1]; strcpy(value, defaultVal); currentDefaults_[attrEnum] = value; if ( validateFloatVal ) { float floatVal = 0; if (validateFloat(currentDefaults_[attrEnum], floatVal, attrEnum)) { if (currentFloats_[attrEnum]) { NADELETEBASIC(currentFloats_[attrEnum], NADHEAP); } currentFloats_[attrEnum] = new NADHEAP float; *currentFloats_[attrEnum] = floatVal; } } if ( currentTokens_[attrEnum] ) { NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; } } void NADefaults::setSchemaAsLdapUser(const NAString val) { NAString ldapUsername = val; if ( ldapUsername.isNull() ) ldapUsername = getValue(LDAP_USERNAME); if ( ldapUsername.isNull() ) return; ldapUsername.toUpper(); NAString schName = '"'; schName += ldapUsername; schName += '"'; // check schema name before insert // may get special characters from ldap ComSchemaName cSchName(schName); if ( !cSchName.getSchemaNamePart().isEmpty() && cSchName.getCatalogNamePart().isEmpty()) // should have no catalog { insert(SCHEMA, schName); } else { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(schName) << DgString1("SCHEMA"); } }
1
18,115
A default value of 100 MB maybe too small. I understand that this is good for mixed workloads, but do consider than plans with Unpack, especially when used for insert/upsert are simple. Unpack is always serial and part of master exe. Often there is only one in a query. The cost of having a low value here seems to be that queue length from unpack could be limited by this when we are loading a table with wide rows (even if actual data in row is not wide). We know from performance runs that queue length is critical to upsert load performance. This is a hunch on my part and could be misguided. It will be good to performance test for upsert of table with wide rows, if you think that is worthwhile.
apache-trafodion
cpp
@@ -32,8 +32,8 @@ type ( GetByOwner(address.Address) *Candidate GetBySelfStakingIndex(uint64) *Candidate Upsert(*Candidate) error - CreditBucketPool(*big.Int, bool) error - DebitBucketPool(*big.Int, bool, bool) error + CreditBucketPool(*big.Int) error + DebitBucketPool(*big.Int, bool) error Commit() error }
1
// Copyright (c) 2020 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package staking import ( "math/big" "github.com/pkg/errors" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/action/protocol" ) type ( // CandidateStateManager is candidate state manager on top of StateManager CandidateStateManager interface { protocol.StateManager CandCenter() CandidateCenter BucketPool() *BucketPool // candidate and bucket pool related Size() int ContainsName(string) bool ContainsOwner(address.Address) bool ContainsOperator(address.Address) bool ContainsSelfStakingBucket(uint64) bool GetByName(string) *Candidate GetByOwner(address.Address) *Candidate GetBySelfStakingIndex(uint64) *Candidate Upsert(*Candidate) error CreditBucketPool(*big.Int, bool) error DebitBucketPool(*big.Int, bool, bool) error Commit() error } candSM struct { protocol.StateManager candCenter *candCenter bucketPool *BucketPool } ) // NewCandidateStateManager returns a new CandidateStateManager instance func NewCandidateStateManager(sm protocol.StateManager) (CandidateStateManager, error) { // TODO: we can store csm in a local cache, just as how statedb store the workingset // b/c most time the sm is used before, no need to create another clone csr, err := ConstructBaseView(sm) if err != nil { return nil, err } // make a copy of candidate center and bucket pool, so they can be modified by csm // and won't affect base view until being committed csm := &candSM{ StateManager: sm, // TODO: remove CandidateCenter interface, no need for (*candCenter) candCenter: csr.CandCenter().Base().(*candCenter), bucketPool: csr.BucketPool().Clone(), } // extract view change from SM if err := csm.bucketPool.SyncPool(sm); err != nil { return nil, err } // TODO: remove CandidateCenter interface, convert the code below to candCenter.SyncCenter() ser, err := protocol.UnloadAndAssertBytes(sm, protocolID) switch errors.Cause(err) { case protocol.ErrTypeAssertion: return nil, errors.Wrap(err, "failed to create CandidateStateManager") case protocol.ErrNoName: return csm, nil } delta := CandidateList{} if err := delta.Deserialize(ser); err != nil { return nil, err } // apply delta to the center if err := csm.candCenter.SetDelta(delta); err != nil { return nil, err } return csm, nil } func (csm *candSM) CandCenter() CandidateCenter { return csm.candCenter } func (csm *candSM) BucketPool() *BucketPool { return csm.bucketPool } func (csm *candSM) Size() int { return csm.candCenter.Size() } func (csm *candSM) ContainsName(name string) bool { return csm.candCenter.ContainsName(name) } func (csm *candSM) ContainsOwner(addr address.Address) bool { return csm.candCenter.ContainsOwner(addr) } func (csm *candSM) ContainsOperator(addr address.Address) bool { return csm.candCenter.ContainsOperator(addr) } func (csm *candSM) ContainsSelfStakingBucket(index uint64) bool { return csm.candCenter.ContainsSelfStakingBucket(index) } func (csm *candSM) GetByName(name string) *Candidate { return csm.candCenter.GetByName(name) } func (csm *candSM) GetByOwner(addr address.Address) *Candidate { return csm.candCenter.GetByOwner(addr) } func (csm *candSM) GetBySelfStakingIndex(index uint64) *Candidate { return csm.candCenter.GetBySelfStakingIndex(index) } // Upsert writes the candidate into state manager and cand center func (csm *candSM) Upsert(d *Candidate) error { if err := csm.candCenter.Upsert(d); err != nil { return err } if err := putCandidate(csm.StateManager, d); err != nil { return err } delta := csm.candCenter.Delta() if len(delta) == 0 { return nil } ser, err := delta.Serialize() if err != nil { return err } // load change to sm csm.StateManager.Load(protocolID, ser) return nil } func (csm *candSM) CreditBucketPool(amount *big.Int, create bool) error { return csm.bucketPool.CreditPool(csm.StateManager, amount, create) } func (csm *candSM) DebitBucketPool(amount *big.Int, newBucket, create bool) error { return csm.bucketPool.DebitPool(csm.StateManager, amount, newBucket, create) } func (csm *candSM) Commit() error { if err := csm.candCenter.Commit(); err != nil { return err } if err := csm.bucketPool.Commit(csm.StateManager); err != nil { return err } // write update view back to state factory return csm.WriteView(protocolID, ConvertToViewData(csm)) }
1
22,186
think we should pass in ctx, and use ctx.GreenlandHeight inside bucketPool to determine if create or not
iotexproject-iotex-core
go
@@ -153,7 +153,7 @@ public class I18nTest extends JUnit4TestBase { assertThat(ime.isActivated()).isTrue(); assertThat(ime.getActiveEngine()).isEqualTo(desiredEngine); - // Send the Romaji for "Tokyo". The space at the end instructs the IME to convert the word. + // Send the Romaji for "Tokyo". The space at the end instructs the IME to transform the word. input.sendKeys("toukyou "); input.sendKeys(Keys.ENTER);
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; import static org.openqa.selenium.testing.drivers.Browser.CHROME; import static org.openqa.selenium.testing.drivers.Browser.FIREFOX; import static org.openqa.selenium.testing.drivers.Browser.HTMLUNIT; import static org.openqa.selenium.testing.drivers.Browser.IE; import static org.openqa.selenium.testing.drivers.Browser.MARIONETTE; import org.junit.Test; import org.openqa.selenium.environment.GlobalTestEnvironment; import org.openqa.selenium.testing.Ignore; import org.openqa.selenium.testing.JUnit4TestBase; import org.openqa.selenium.testing.NotYetImplemented; import org.openqa.selenium.testing.TestUtilities; import java.util.List; public class I18nTest extends JUnit4TestBase { /** * The Hebrew word shalom (peace) encoded in order Shin (sh) Lamed (L) Vav (O) final-Mem (M). */ private static final String shalom = "\u05E9\u05DC\u05D5\u05DD"; /** * The Hebrew word tmunot (images) encoded in order Taf (t) Mem (m) Vav (u) Nun (n) Vav (o) Taf * (t). */ private static final String tmunot = "\u05EA\u05DE\u05D5\u05E0\u05D5\u05EA"; /** * Japanese for "Tokyo" */ private static final String tokyo = "\u6771\u4EAC"; /** * Chinese for "The Voice of China" */ private static final String theVoiceOfChina = "\u4E2D\u56FD\u4E4B\u58F0"; @Test public void testCn() { driver.get(pages.chinesePage); driver.findElement(By.linkText(theVoiceOfChina)).click(); } @Test public void testEnteringHebrewTextFromLeftToRight() { driver.get(pages.chinesePage); WebElement input = driver.findElement(By.name("i18n")); input.sendKeys(shalom); assertThat(input.getAttribute("value")).isEqualTo(shalom); } @Test public void testEnteringHebrewTextFromRightToLeft() { driver.get(pages.chinesePage); WebElement input = driver.findElement(By.name("i18n")); input.sendKeys(tmunot); assertThat(input.getAttribute("value")).isEqualTo(tmunot); } @Test @Ignore(value = CHROME, reason = "ChromeDriver only supports characters in the BMP") public void testEnteringSupplementaryCharacters() { assumeFalse("IE: versions less thank 10 have issue 5069", TestUtilities.isInternetExplorer(driver) && TestUtilities.getIEVersion(driver) < 10); driver.get(pages.chinesePage); String input = ""; input += new String(Character.toChars(0x20000)); input += new String(Character.toChars(0x2070E)); input += new String(Character.toChars(0x2000B)); input += new String(Character.toChars(0x2A190)); input += new String(Character.toChars(0x2A6B2)); WebElement el = driver.findElement(By.name("i18n")); el.sendKeys(input); assertThat(el.getAttribute("value")).isEqualTo(input); } @Test public void testShouldBeAbleToReturnTheTextInAPage() { String url = GlobalTestEnvironment.get() .getAppServer() .whereIs("encoding"); driver.get(url); String text = driver.findElement(By.tagName("body")).getText(); assertThat(text).isEqualTo(shalom); } @Test @Ignore(IE) @Ignore(CHROME) @Ignore(FIREFOX) @Ignore(MARIONETTE) @NotYetImplemented(HTMLUNIT) public void testShouldBeAbleToActivateIMEEngine() throws InterruptedException { assumeTrue("IME is supported on Linux only.", TestUtilities.getEffectivePlatform().is(Platform.LINUX)); driver.get(pages.formPage); WebElement input = driver.findElement(By.id("working")); // Activate IME. By default, this keycode activates IBus input for Japanese. WebDriver.ImeHandler ime = driver.manage().ime(); List<String> engines = ime.getAvailableEngines(); String desiredEngine = "anthy"; if (!engines.contains(desiredEngine)) { System.out.println("Desired engine " + desiredEngine + " not available, skipping test."); return; } ime.activateEngine(desiredEngine); int totalWaits = 0; while (!ime.isActivated() && (totalWaits < 10)) { Thread.sleep(500); totalWaits++; } assertThat(ime.isActivated()).isTrue(); assertThat(ime.getActiveEngine()).isEqualTo(desiredEngine); // Send the Romaji for "Tokyo". The space at the end instructs the IME to convert the word. input.sendKeys("toukyou "); input.sendKeys(Keys.ENTER); String elementValue = input.getAttribute("value"); ime.deactivate(); assertThat(ime.isActivated()).isFalse(); // IME is not present. Don't fail because of that. But it should have the Romaji value // instead. assertThat(elementValue) .describedAs("The elemnt's value should either remain in Romaji or be converted properly.") .isEqualTo(tokyo); } @Test @Ignore(IE) @Ignore(CHROME) @Ignore(FIREFOX) public void testShouldBeAbleToInputJapanese() { assumeTrue("IME is supported on Linux only.", TestUtilities.getEffectivePlatform().is(Platform.LINUX)); driver.get(pages.formPage); WebElement input = driver.findElement(By.id("working")); // Activate IME. By default, this keycode activates IBus input for Japanese. input.sendKeys(Keys.ZENKAKU_HANKAKU); // Send the Romaji for "Tokyo". The space at the end instructs the IME to convert the word. input.sendKeys("toukyou "); String elementValue = input.getAttribute("value"); // Turn OFF IME input first. input.sendKeys(Keys.ZENKAKU_HANKAKU); // IME is not present. Don't fail because of that. But it should have the Romaji value // instead. assertThat(elementValue) .describedAs("The element's value should either remain in Romaji or be converted properly.") .isIn(tokyo, "\uE040" + "toukyou ", "toukyou "); } }
1
16,514
A correct by unrelated change. We'll slide this one in without another word ;)
SeleniumHQ-selenium
rb
@@ -1,5 +1,4 @@ <%= semantic_form_for checkout, url: checkouts_path(checkout.plan), html: { method: 'post' } do |form| %> - <%= form.semantic_errors %> <%= form.inputs do %> <% if signed_out? %>
1
<%= semantic_form_for checkout, url: checkouts_path(checkout.plan), html: { method: 'post' } do |form| %> <%= form.semantic_errors %> <%= form.inputs do %> <% if signed_out? %> <ul class="checkout-sigin-signup-toggle"> <li class="video_tutorial-alert sign-in-prompt"> <%= link_to "Already have an account? Sign in", sign_in_path(return_to: request.fullpath) %> </li> <li><%= link_to "Sign up with GitHub", github_auth_path %></li> </ul> <% end %> <%= hidden_field_tag "coupon_id" %> <% if signed_out? %> <%= form.input :name, required: true %> <%= form.input :email, as: :email, required: true %> <%= form.input :password, required: true %> <% end %> <% if checkout.needs_github? %> <%= form.input :github_username, required: true, label: "GitHub username", hint: "Be sure to enter a valid, unique GitHub username. Organizations are not allowed." %> <% end %> <% end %> <%= link_to 'Need an address on your receipt?', '#', class: 'reveal-address' %> <%= form.inputs class: 'address-info' do %> <%= render 'users/address_fields', form: form %> <% end %> <%= form.inputs id: 'billing-information' do %> <h3>Secure Credit Card Payment</h3> <li id="credit-card-icons"> <%= image_tag "icons/visa.png" %> <%= image_tag "icons/master.png" %> <%= image_tag "icons/american_express.png" %> <%= image_tag "icons/discover.png" %> </li> <li class="payment-errors"></li> <li id="checkout_cc_input" class="stripe"> <label for='card-number'>Card Number</label> <input type='text' size='20' autocomplete='off' id='card-number' class='card-number'/> </li> <li id="checkout_expiration_input" class="stripe"> <label>Expiration</label> <%= select_month nil, { prompt: 'Month', add_month_numbers: true }, class: 'card-expiry-month' %> <%= select_year nil, { prompt: 'Year', start_year: Time.zone.today.year, end_year: 10.years.from_now.year }, class: 'card-expiry-year' %> </li> <li id="checkout_cvc_input" class="stripe"> <label for='card-cvc'>CVC</label> <input type='text' size='4' autocomplete='off' id='card-cvc' class='card-cvc'/> </li> <% end %> <%= form.actions do %> <%= form.action :submit, label: "Submit Payment &mdash; #{submit_amount(checkout)}".html_safe %> <% end %> <% end %> <%= render partial: 'shared/stripe' %>
1
14,600
Why remove this?
thoughtbot-upcase
rb
@@ -11,7 +11,7 @@ use Shopsys\FrameworkBundle\Component\Router\Security\RouteCsrfProtector; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\RequestStack; use Symfony\Component\Routing\Router; -use Twig_Environment; +use Twig\Environment; class GridTest extends TestCase {
1
<?php namespace Tests\FrameworkBundle\Unit\Component\Grid; use PHPUnit\Framework\TestCase; use Shopsys\FrameworkBundle\Component\Grid\DataSourceInterface; use Shopsys\FrameworkBundle\Component\Grid\Grid; use Shopsys\FrameworkBundle\Component\Grid\GridView; use Shopsys\FrameworkBundle\Component\Paginator\PaginationResult; use Shopsys\FrameworkBundle\Component\Router\Security\RouteCsrfProtector; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\RequestStack; use Symfony\Component\Routing\Router; use Twig_Environment; class GridTest extends TestCase { public function testGetParametersFromRequest() { $getParameters = [ Grid::GET_PARAMETER => [ 'gridId' => [ 'limit' => '100', 'page' => '3', 'order' => '-name', ], ], ]; $request = new Request($getParameters); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $this->assertSame('gridId', $grid->getId()); $this->assertSame(100, $grid->getLimit()); $this->assertSame(3, $grid->getPage()); $this->assertSame('name', $grid->getOrderSourceColumnName()); $this->assertSame('desc', $grid->getOrderDirection()); } public function testAddColumn() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $grid->addColumn('columnId1', 'sourceColumnName1', 'title1', true)->setClassAttribute('classAttribute'); $grid->addColumn('columnId2', 'sourceColumnName2', 'title2', false); $columns = $grid->getColumnsById(); $this->assertCount(2, $columns); $column2 = array_pop($columns); /* @var $column2 \Shopsys\FrameworkBundle\Component\Grid\Column */ $column1 = array_pop($columns); /* @var $column1 \Shopsys\FrameworkBundle\Component\Grid\Column */ $this->assertSame('columnId1', $column1->getId()); $this->assertSame('sourceColumnName1', $column1->getSourceColumnName()); $this->assertSame('title1', $column1->getTitle()); $this->assertSame(true, $column1->isSortable()); $this->assertSame('classAttribute', $column1->getClassAttribute()); $this->assertSame('columnId2', $column2->getId()); $this->assertSame('sourceColumnName2', $column2->getSourceColumnName()); $this->assertSame('title2', $column2->getTitle()); $this->assertSame(false, $column2->isSortable()); $this->assertSame('', $column2->getClassAttribute()); } public function testAddColumnDuplicateId() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $grid->addColumn('columnId1', 'sourceColumnName1', 'title1'); $this->expectException(\Shopsys\FrameworkBundle\Component\Grid\Exception\DuplicateColumnIdException::class); $grid->addColumn('columnId1', 'sourceColumnName2', 'title2'); } public function testEnablePaging() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $grid->enablePaging(); $this->assertTrue($grid->isEnabledPaging()); } public function testEnablePagingDefaultDisable() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $this->assertFalse($grid->isEnabledPaging()); } public function testSetDefaultOrder() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $grid->setDefaultOrder('columnId1', DataSourceInterface::ORDER_DESC); $this->assertSame('-columnId1', $grid->getOrderSourceColumnNameWithDirection()); $grid->setDefaultOrder('columnId2', DataSourceInterface::ORDER_ASC); $this->assertSame('columnId2', $grid->getOrderSourceColumnNameWithDirection()); } public function testSetDefaultOrderWithRequest() { $getParameters = [ Grid::GET_PARAMETER => [ 'gridId' => [ 'order' => '-request', ], ], ]; $request = new Request($getParameters); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $grid->setDefaultOrder('default', DataSourceInterface::ORDER_ASC); $this->assertSame('-request', $grid->getOrderSourceColumnNameWithDirection()); } public function testCreateView() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->getMockBuilder(DataSourceInterface::class) ->setMethods(['getTotalRowsCount', 'getPaginatedRows']) ->getMockForAbstractClass(); $dataSourceMock->expects($this->never())->method('getTotalRowsCount'); $dataSourceMock->expects($this->once())->method('getPaginatedRows') ->willReturn(new PaginationResult(1, 1, 0, [])); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $gridView = $grid->createView(); $this->assertInstanceOf(GridView::class, $gridView); } public function testCreateViewWithPaging() { $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->getMockBuilder(DataSourceInterface::class) ->setMethods(['getTotalRowsCount', 'getPaginatedRows']) ->getMockForAbstractClass(); $dataSourceMock->expects($this->once())->method('getTotalRowsCount')->willReturn(0); $dataSourceMock->expects($this->once())->method('getPaginatedRows') ->willReturn(new PaginationResult(1, 1, 0, [])); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $grid->enablePaging(); $gridView = $grid->createView(); $this->assertInstanceOf(GridView::class, $gridView); } public function testEnableDragAndDrop() { $entityClass = 'Path\To\Entity\Class'; $request = new Request(); $requestStack = new RequestStack(); $requestStack->push($request); $twigMock = $this->createMock(Twig_Environment::class); $routerMock = $this->createMock(Router::class); $routeCsrfProtectorMock = $this->createMock(RouteCsrfProtector::class); $dataSourceMock = $this->createMock(DataSourceInterface::class); $grid = new Grid( 'gridId', $dataSourceMock, $requestStack, $routerMock, $routeCsrfProtectorMock, $twigMock ); $this->assertFalse($grid->isDragAndDrop()); $grid->enableDragAndDrop($entityClass); $this->assertTrue($grid->isDragAndDrop()); } }
1
21,579
I still see some usages of not namespaced variants (look for Twig_ in project, about 42 matches). Is it intentional?
shopsys-shopsys
php
@@ -22,11 +22,11 @@ import ( "sync" "time" - "github.com/aws/amazon-ecs-agent/agent/api" "github.com/aws/amazon-ecs-agent/agent/resources/cgroup" "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" "github.com/cihub/seelog" "github.com/containerd/cgroups" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" )
1
// +build linux // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package taskresource import ( "encoding/json" "os" "path/filepath" "strconv" "sync" "time" "github.com/aws/amazon-ecs-agent/agent/api" "github.com/aws/amazon-ecs-agent/agent/resources/cgroup" "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" "github.com/cihub/seelog" "github.com/containerd/cgroups" "github.com/pkg/errors" ) const ( memorySubsystem = "/memory" memoryUseHierarchy = "memory.use_hierarchy" rootReadOnlyPermissions = os.FileMode(400) ) var ( enableMemoryHierarchy = []byte(strconv.Itoa(1)) ) // CgroupResource represents Cgroup resource type CgroupResource struct { control cgroup.Control CgroupRoot string CgroupMountPath string ioutil ioutilwrapper.IOUtil createdAt time.Time desiredStatusUnsafe CgroupStatus knownStatusUnsafe CgroupStatus // lock is used for fields that are accessed and updated concurrently lock sync.RWMutex } // NewCgroupResource is used to return an object that implements the Resource interface func NewCgroupResource(control cgroup.Control, cgroupRoot string, cgroupMountPath string) *CgroupResource { return &CgroupResource{ control: control, CgroupRoot: cgroupRoot, CgroupMountPath: cgroupMountPath, ioutil: ioutilwrapper.NewIOUtil(), } } // SetDesiredStatus safely sets the desired status of the resource func (c *CgroupResource) SetDesiredStatus(status CgroupStatus) { c.lock.Lock() defer c.lock.Unlock() c.desiredStatusUnsafe = status } // GetDesiredStatus safely returns the desired status of the task func (c *CgroupResource) GetDesiredStatus() CgroupStatus { c.lock.RLock() defer c.lock.RUnlock() return c.desiredStatusUnsafe } // SetKnownStatus safely sets the currently known status of the resource func (c *CgroupResource) SetKnownStatus(status CgroupStatus) { c.lock.Lock() defer c.lock.Unlock() c.knownStatusUnsafe = status } // GetKnownStatus safely returns the currently known status of the task func (c *CgroupResource) GetKnownStatus() CgroupStatus { c.lock.RLock() defer c.lock.RUnlock() return c.knownStatusUnsafe } // SetCreatedAt sets the timestamp for resource's creation time func (c *CgroupResource) SetCreatedAt(createdAt time.Time) { if createdAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.createdAt = createdAt } // GetCreatedAt sets the timestamp for resource's creation time func (c *CgroupResource) GetCreatedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.createdAt } // Create creates cgroup root for the task func (c *CgroupResource) Create(task *api.Task) error { err := c.setupTaskCgroup(task) if err != nil { seelog.Criticalf("Cgroup resource [%s]: unable to setup cgroup root: %v", task.Arn, err) return err } return nil } func (c *CgroupResource) setupTaskCgroup(task *api.Task) error { cgroupRoot := c.CgroupRoot seelog.Debugf("Cgroup resource [%s]: setting up cgroup at: %s", task.Arn, cgroupRoot) if c.control.Exists(cgroupRoot) { seelog.Debugf("Cgroup resource [%s]: cgroup at %s already exists, skipping creation", task.Arn, cgroupRoot) return nil } linuxResourceSpec, err := task.BuildLinuxResourceSpec() if err != nil { return errors.Wrapf(err, "cgroup resource [%s]: setup cgroup: unable to build resource spec for task", task.Arn) } cgroupSpec := cgroup.Spec{ Root: cgroupRoot, Specs: &linuxResourceSpec, } _, err = c.control.Create(&cgroupSpec) if err != nil { return errors.Wrapf(err, "cgroup resource [%s]: setup cgroup: unable to create cgroup at %s", task.Arn, cgroupRoot) } // enabling cgroup memory hierarchy by doing 'echo 1 > memory.use_hierarchy' memoryHierarchyPath := filepath.Join(c.CgroupMountPath, memorySubsystem, cgroupRoot, memoryUseHierarchy) err = c.ioutil.WriteFile(memoryHierarchyPath, enableMemoryHierarchy, rootReadOnlyPermissions) if err != nil { return errors.Wrapf(err, "cgroup resource [%s]: setup cgroup: unable to set use hierarchy flag", task.Arn) } return nil } // Cleanup removes the cgroup root created for the task func (c *CgroupResource) Cleanup() error { err := c.control.Remove(c.CgroupRoot) // Explicitly handle cgroup deleted error if err != nil { if err == cgroups.ErrCgroupDeleted { seelog.Warnf("Cgroup at %s has already been removed: %v", c.CgroupRoot, err) return nil } return errors.Wrapf(err, "resource: cleanup cgroup: unable to remove cgroup at %s", c.CgroupRoot) } return nil } // cgroupResourceJSON duplicates CgroupResource fields, only for marshalling and unmarshalling purposes type cgroupResourceJSON struct { CgroupRoot string `json:"CgroupRoot"` CgroupMountPath string `json:"CgroupMountPath"` CreatedAt time.Time `json:",omitempty"` DesiredStatus *CgroupStatus `json:"DesiredStatus"` KnownStatus *CgroupStatus `json:"KnownStatus"` } // MarshalJSON marshals CgroupResource object using duplicate struct CgroupResourceJSON func (c *CgroupResource) MarshalJSON() ([]byte, error) { if c == nil { return nil, errors.New("cgroup resource is nil") } return json.Marshal(cgroupResourceJSON{ c.CgroupRoot, c.CgroupMountPath, c.GetCreatedAt(), func() *CgroupStatus { desiredState := c.GetDesiredStatus(); return &desiredState }(), func() *CgroupStatus { knownState := c.GetKnownStatus(); return &knownState }(), }) } // UnmarshalJSON unmarshals CgroupResource object using duplicate struct CgroupResourceJSON func (c *CgroupResource) UnmarshalJSON(b []byte) error { temp := cgroupResourceJSON{} if err := json.Unmarshal(b, &temp); err != nil { return err } c.CgroupRoot = temp.CgroupRoot c.CgroupMountPath = temp.CgroupMountPath if temp.DesiredStatus != nil { c.SetDesiredStatus(*temp.DesiredStatus) } if temp.KnownStatus != nil { c.SetKnownStatus(*temp.KnownStatus) } return nil }
1
19,343
Is this a new dependency? Does this require a dep update?
aws-amazon-ecs-agent
go
@@ -62,6 +62,8 @@ bool Print(T val, Type type, int /*indent*/, Type * /*union_type*/, if (type.base_type == BASE_TYPE_BOOL) { text += val != 0 ? "true" : "false"; + } else if (opts.generate_hexfloat_in_json && (type.base_type == BASE_TYPE_FLOAT || type.base_type == BASE_TYPE_DOUBLE)) { + text += FloatToStringtStringHex(val); } else { text += NumToString(val); }
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // independent from idl_parser, since this code is not needed for most clients #include "flatbuffers/flatbuffers.h" #include "flatbuffers/flexbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" namespace flatbuffers { static bool GenStruct(const StructDef &struct_def, const Table *table, int indent, const IDLOptions &opts, std::string *_text); // If indentation is less than 0, that indicates we don't want any newlines // either. const char *NewLine(const IDLOptions &opts) { return opts.indent_step >= 0 ? "\n" : ""; } int Indent(const IDLOptions &opts) { return std::max(opts.indent_step, 0); } // Output an identifier with or without quotes depending on strictness. void OutputIdentifier(const std::string &name, const IDLOptions &opts, std::string *_text) { std::string &text = *_text; if (opts.strict_json) text += "\""; text += name; if (opts.strict_json) text += "\""; } // Print (and its template specialization below for pointers) generate text // for a single FlatBuffer value into JSON format. // The general case for scalars: template<typename T> bool Print(T val, Type type, int /*indent*/, Type * /*union_type*/, const IDLOptions &opts, std::string *_text) { std::string &text = *_text; if (type.enum_def && opts.output_enum_identifiers) { auto enum_val = type.enum_def->ReverseLookup(static_cast<int64_t>(val)); if (enum_val) { text += "\""; text += enum_val->name; text += "\""; return true; } } if (type.base_type == BASE_TYPE_BOOL) { text += val != 0 ? "true" : "false"; } else { text += NumToString(val); } return true; } // Print a vector a sequence of JSON values, comma separated, wrapped in "[]". template<typename T> bool PrintVector(const Vector<T> &v, Type type, int indent, const IDLOptions &opts, std::string *_text) { std::string &text = *_text; text += "["; text += NewLine(opts); for (uoffset_t i = 0; i < v.size(); i++) { if (i) { if (!opts.protobuf_ascii_alike) text += ","; text += NewLine(opts); } text.append(indent + Indent(opts), ' '); if (IsStruct(type)) { if (!Print(v.GetStructFromOffset(i * type.struct_def->bytesize), type, indent + Indent(opts), nullptr, opts, _text)) { return false; } } else { if (!Print(v[i], type, indent + Indent(opts), nullptr, opts, _text)) { return false; } } } text += NewLine(opts); text.append(indent, ' '); text += "]"; return true; } // Specialization of Print above for pointer types. template<> bool Print<const void *>(const void *val, Type type, int indent, Type *union_type, const IDLOptions &opts, std::string *_text) { switch (type.base_type) { case BASE_TYPE_UNION: // If this assert hits, you have an corrupt buffer, a union type field // was not present or was out of range. assert(union_type); return Print<const void *>(val, *union_type, indent, nullptr, opts, _text); case BASE_TYPE_STRUCT: if (!GenStruct(*type.struct_def, reinterpret_cast<const Table *>(val), indent, opts, _text)) { return false; } break; case BASE_TYPE_STRING: { auto s = reinterpret_cast<const String *>(val); if (!EscapeString(s->c_str(), s->Length(), _text, opts.allow_non_utf8)) { return false; } break; } case BASE_TYPE_VECTOR: type = type.VectorType(); // Call PrintVector above specifically for each element type: switch (type.base_type) { // clang-format off #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: \ if (!PrintVector<CTYPE>( \ *reinterpret_cast<const Vector<CTYPE> *>(val), \ type, indent, opts, _text)) { \ return false; \ } \ break; FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD // clang-format on } break; default: assert(0); } return true; } // Generate text for a scalar field. template<typename T> static bool GenField(const FieldDef &fd, const Table *table, bool fixed, const IDLOptions &opts, int indent, std::string *_text) { return Print(fixed ? reinterpret_cast<const Struct *>(table)->GetField<T>(fd.value.offset) : table->GetField<T>(fd.value.offset, IsFloat(fd.value.type.base_type) ? static_cast<T>(strtod(fd.value.constant.c_str(), nullptr)) : static_cast<T>(StringToInt(fd.value.constant.c_str()))), fd.value.type, indent, nullptr, opts, _text); } static bool GenStruct(const StructDef &struct_def, const Table *table, int indent, const IDLOptions &opts, std::string *_text); // Generate text for non-scalar field. static bool GenFieldOffset(const FieldDef &fd, const Table *table, bool fixed, int indent, Type *union_type, const IDLOptions &opts, std::string *_text) { const void *val = nullptr; if (fixed) { // The only non-scalar fields in structs are structs. assert(IsStruct(fd.value.type)); val = reinterpret_cast<const Struct *>(table)->GetStruct<const void *>( fd.value.offset); } else if (fd.flexbuffer) { auto vec = table->GetPointer<const Vector<uint8_t> *>(fd.value.offset); auto root = flexbuffers::GetRoot(vec->data(), vec->size()); root.ToString(true, opts.strict_json, *_text); return true; } else if (fd.nested_flatbuffer) { auto vec = table->GetPointer<const Vector<uint8_t> *>(fd.value.offset); auto root = GetRoot<Table>(vec->data()); return GenStruct(*fd.nested_flatbuffer, root, indent, opts, _text); } else { val = IsStruct(fd.value.type) ? table->GetStruct<const void *>(fd.value.offset) : table->GetPointer<const void *>(fd.value.offset); } return Print(val, fd.value.type, indent, union_type, opts, _text); } // Generate text for a struct or table, values separated by commas, indented, // and bracketed by "{}" static bool GenStruct(const StructDef &struct_def, const Table *table, int indent, const IDLOptions &opts, std::string *_text) { std::string &text = *_text; text += "{"; int fieldout = 0; Type *union_type = nullptr; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { FieldDef &fd = **it; auto is_present = struct_def.fixed || table->CheckField(fd.value.offset); auto output_anyway = opts.output_default_scalars_in_json && IsScalar(fd.value.type.base_type) && !fd.deprecated; if (is_present || output_anyway) { if (fieldout++) { if (!opts.protobuf_ascii_alike) text += ","; } text += NewLine(opts); text.append(indent + Indent(opts), ' '); OutputIdentifier(fd.name, opts, _text); if (!opts.protobuf_ascii_alike || (fd.value.type.base_type != BASE_TYPE_STRUCT && fd.value.type.base_type != BASE_TYPE_VECTOR)) text += ":"; text += " "; switch (fd.value.type.base_type) { // clang-format off #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: \ if (!GenField<CTYPE>(fd, table, struct_def.fixed, \ opts, indent + Indent(opts), _text)) { \ return false; \ } \ break; FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD) #undef FLATBUFFERS_TD // Generate drop-thru case statements for all pointer types: #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD) #undef FLATBUFFERS_TD if (!GenFieldOffset(fd, table, struct_def.fixed, indent + Indent(opts), union_type, opts, _text)) { return false; } break; // clang-format on } if (fd.value.type.base_type == BASE_TYPE_UTYPE) { auto enum_val = fd.value.type.enum_def->ReverseLookup( table->GetField<uint8_t>(fd.value.offset, 0)); union_type = enum_val ? &enum_val->union_type : nullptr; } } } text += NewLine(opts); text.append(indent, ' '); text += "}"; return true; } // Generate a text representation of a flatbuffer in JSON format. bool GenerateText(const Parser &parser, const void *flatbuffer, std::string *_text) { std::string &text = *_text; assert(parser.root_struct_def_); // call SetRootType() text.reserve(1024); // Reduce amount of inevitable reallocs. if (!GenStruct(*parser.root_struct_def_, GetRoot<Table>(flatbuffer), 0, parser.opts, _text)) { return false; } text += NewLine(parser.opts); return true; } std::string TextFileName(const std::string &path, const std::string &file_name) { return path + file_name + ".json"; } bool GenerateTextFile(const Parser &parser, const std::string &path, const std::string &file_name) { if (!parser.builder_.GetSize() || !parser.root_struct_def_) return true; std::string text; if (!GenerateText(parser, parser.builder_.GetBufferPointer(), &text)) { return false; } return flatbuffers::SaveFile(TextFileName(path, file_name).c_str(), text, false); } std::string TextMakeRule(const Parser &parser, const std::string &path, const std::string &file_name) { if (!parser.builder_.GetSize() || !parser.root_struct_def_) return ""; std::string filebase = flatbuffers::StripPath(flatbuffers::StripExtension(file_name)); std::string make_rule = TextFileName(path, filebase) + ": " + file_name; auto included_files = parser.GetIncludedFilesRecursive(parser.root_struct_def_->file); for (auto it = included_files.begin(); it != included_files.end(); ++it) { make_rule += " " + *it; } return make_rule; } } // namespace flatbuffers
1
12,990
> opts.generate_hexfloat_in_json && **IsFloat**(type.base_type)
google-flatbuffers
java
@@ -7,4 +7,5 @@ package archer type Manifest interface { Marshal() ([]byte, error) DockerfilePath() string + AppName() string }
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package archer // Manifest is the interface for serializing a manifest object to a YAML document or CloudFormation template. type Manifest interface { Marshal() ([]byte, error) DockerfilePath() string }
1
11,283
As an alternative to this, we could also create a new method, like `Common() *AppManifest` This way we don't have to add a new method to the interface everytime we add a new field to the `AppManifest`. This is a nit though so it's up to you.
aws-copilot-cli
go
@@ -28,6 +28,7 @@ script_deps = { "urwid>=1.1", "lxml>=3.3.6", "Pillow>=2.3.0", + "harparser", }, "mitmdump": set() }
1
from setuptools import setup, find_packages from codecs import open import os from libmproxy import version # Based on https://github.com/pypa/sampleproject/blob/master/setup.py # and https://python-packaging-user-guide.readthedocs.org/ here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.txt'), encoding='utf-8') as f: long_description = f.read() scripts = ["mitmdump"] if os.name != "nt": scripts.append("mitmproxy") deps = { "netlib>=%s, <%s" % (version.MINORVERSION, version.NEXT_MINORVERSION), "pyasn1>0.1.2", "pyOpenSSL>=0.14", "Flask>=0.10.1", "tornado>=4.0.2", "sortedcontainers>=0.9.1" } script_deps = { "mitmproxy": { "urwid>=1.1", "lxml>=3.3.6", "Pillow>=2.3.0", }, "mitmdump": set() } for script in scripts: deps.update(script_deps[script]) if os.name == "nt": deps.add("pydivert>=0.0.4") # Transparent proxying on Windows setup( name="mitmproxy", version=version.VERSION, description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.", long_description=long_description, url="http://mitmproxy.org", author="Aldo Cortesi", author_email="aldo@corte.si", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Console :: Curses", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Topic :: Security", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: Proxy Servers", "Topic :: Software Development :: Testing" ], packages=find_packages(), include_package_data=True, scripts = scripts, install_requires=list(deps), extras_require={ 'dev': [ "mock>=1.0.1", "nose>=1.3.0", "nose-cov>=1.6", "coveralls>=0.4.1", "pathod>=%s, <%s" % (version.MINORVERSION, version.NEXT_MINORVERSION) ], 'contentviews': [ "pyamf>=0.6.1", "protobuf>=2.5.0", "cssutils>=1.0" ] } )
1
10,601
As long as we have this feature as an inline script, I'm inclined to remove the dependency here. I'd suggest that we just try to import harparser and pytz and throw an error instructing the user to install the dependencies (catch for `ImportError`). In the long term, we probably want to include that in the mitmproxy core, but we first want to introduce a clean extension interface before that's going to happen. As soon as said interface is there, porting the inline script and adding the dependency again should be a breeze.
mitmproxy-mitmproxy
py
@@ -0,0 +1,4 @@ +// set the base folder of this project +global.basefolder = `${__dirname}` +require ("rechoir").prepare(require('interpret').extensions, './.gulp/gulpfile.iced'); +require ('./.gulp/gulpfile.iced')
1
1
25,414
makes it work nice with vscode. all logic is now in `.gulp/*.iced` files
Azure-autorest
java
@@ -27,7 +27,7 @@ public interface SearchContext { * @return A list of all {@link WebElement}s, or an empty list if nothing matches * @see org.openqa.selenium.By */ - List<WebElement> findElements(By by); + <T extends WebElement> List<T> findElements(By by); /**
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium; import java.util.List; public interface SearchContext { /** * Find all elements within the current context using the given mechanism. * * @param by The locating mechanism to use * @return A list of all {@link WebElement}s, or an empty list if nothing matches * @see org.openqa.selenium.By */ List<WebElement> findElements(By by); /** * Find the first {@link WebElement} using the given method. * * @param by The locating mechanism * @return The first matching element on the current context * @throws NoSuchElementException If no matching elements are found */ WebElement findElement(By by); }
1
19,273
should a similar change be done for findElement?
SeleniumHQ-selenium
js
@@ -2745,7 +2745,7 @@ TEST (node, local_votes_cache) { ASSERT_NO_ERROR (system.poll (node.aggregator.max_delay)); } - ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 3); + ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes)+node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_cached_late_votes) == 3); ASSERT_FALSE (node.history.votes (send1->root (), send1->hash ()).empty ()); ASSERT_FALSE (node.history.votes (send2->root (), send2->hash ()).empty ()); ASSERT_FALSE (node.history.votes (send3->root (), send3->hash ()).empty ());
1
#include <nano/lib/jsonconfig.hpp> #include <nano/node/election.hpp> #include <nano/node/testing.hpp> #include <nano/node/transport/udp.hpp> #include <nano/test_common/network.hpp> #include <nano/test_common/testutil.hpp> #include <gtest/gtest.h> #include <boost/filesystem.hpp> #include <boost/make_shared.hpp> #include <boost/variant.hpp> #include <numeric> using namespace std::chrono_literals; namespace { void add_required_children_node_config_tree (nano::jsonconfig & tree); } TEST (node, stop) { nano::system system (1); ASSERT_NE (system.nodes[0]->wallets.items.end (), system.nodes[0]->wallets.items.begin ()); system.nodes[0]->stop (); system.io_ctx.run (); ASSERT_TRUE (true); } TEST (node, work_generate) { nano::system system (1); auto & node (*system.nodes[0]); nano::block_hash root{ 1 }; nano::work_version version{ nano::work_version::work_1 }; { auto difficulty = nano::difficulty::from_multiplier (1.5, node.network_params.network.publish_thresholds.base); auto work = node.work_generate_blocking (version, root, difficulty); ASSERT_TRUE (work.is_initialized ()); ASSERT_TRUE (nano::work_difficulty (version, root, *work) >= difficulty); } { auto difficulty = nano::difficulty::from_multiplier (0.5, node.network_params.network.publish_thresholds.base); boost::optional<uint64_t> work; do { work = node.work_generate_blocking (version, root, difficulty); } while (nano::work_difficulty (version, root, *work) >= node.network_params.network.publish_thresholds.base); ASSERT_TRUE (work.is_initialized ()); ASSERT_TRUE (nano::work_difficulty (version, root, *work) >= difficulty); ASSERT_FALSE (nano::work_difficulty (version, root, *work) >= node.network_params.network.publish_thresholds.base); } } TEST (node, block_store_path_failure) { auto service (boost::make_shared<boost::asio::io_context> ()); auto path (nano::unique_path ()); nano::logging logging; logging.init (path); nano::work_pool work (std::numeric_limits<unsigned>::max ()); auto node (std::make_shared<nano::node> (*service, nano::get_available_port (), path, logging, work)); ASSERT_TRUE (node->wallets.items.empty ()); node->stop (); } #if defined(__clang__) && defined(__linux__) && CI // Disable test due to instability with clang and actions TEST (node_DeathTest, DISABLED_readonly_block_store_not_exist) #else TEST (node_DeathTest, readonly_block_store_not_exist) #endif { // This is a read-only node with no ledger file if (nano::using_rocksdb_in_tests ()) { nano::inactive_node node (nano::unique_path (), nano::inactive_node_flag_defaults ()); ASSERT_TRUE (node.node->init_error ()); } else { ASSERT_EXIT (nano::inactive_node node (nano::unique_path (), nano::inactive_node_flag_defaults ()), ::testing::ExitedWithCode (1), ""); } } TEST (node, password_fanout) { boost::asio::io_context io_ctx; auto path (nano::unique_path ()); nano::node_config config; config.peering_port = nano::get_available_port (); config.logging.init (path); nano::work_pool work (std::numeric_limits<unsigned>::max ()); config.password_fanout = 10; nano::node node (io_ctx, path, config, work); auto wallet (node.wallets.create (100)); ASSERT_EQ (10, wallet->store.password.values.size ()); node.stop (); } TEST (node, balance) { nano::system system (1); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto transaction (system.nodes[0]->store.tx_begin_write ()); ASSERT_EQ (std::numeric_limits<nano::uint128_t>::max (), system.nodes[0]->ledger.account_balance (transaction, nano::dev_genesis_key.pub)); } TEST (node, representative) { nano::system system (1); auto block1 (system.nodes[0]->rep_block (nano::dev_genesis_key.pub)); { auto transaction (system.nodes[0]->store.tx_begin_read ()); ASSERT_TRUE (system.nodes[0]->ledger.store.block_exists (transaction, block1)); } nano::keypair key; ASSERT_TRUE (system.nodes[0]->rep_block (key.pub).is_zero ()); } TEST (node, send_unkeyed) { nano::system system (1); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->store.password.value_set (nano::keypair ().prv); ASSERT_EQ (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, system.nodes[0]->config.receive_minimum.number ())); } TEST (node, send_self) { nano::system system (1); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->insert_adhoc (key2.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, system.nodes[0]->config.receive_minimum.number ())); ASSERT_TIMELY (10s, !system.nodes[0]->balance (key2.pub).is_zero ()); ASSERT_EQ (std::numeric_limits<nano::uint128_t>::max () - system.nodes[0]->config.receive_minimum.number (), system.nodes[0]->balance (nano::dev_genesis_key.pub)); } TEST (node, send_single) { nano::system system (2); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (1)->insert_adhoc (key2.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, system.nodes[0]->config.receive_minimum.number ())); ASSERT_EQ (std::numeric_limits<nano::uint128_t>::max () - system.nodes[0]->config.receive_minimum.number (), system.nodes[0]->balance (nano::dev_genesis_key.pub)); ASSERT_TRUE (system.nodes[0]->balance (key2.pub).is_zero ()); ASSERT_TIMELY (10s, !system.nodes[0]->balance (key2.pub).is_zero ()); } TEST (node, send_single_observing_peer) { nano::system system (3); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (1)->insert_adhoc (key2.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, system.nodes[0]->config.receive_minimum.number ())); ASSERT_EQ (std::numeric_limits<nano::uint128_t>::max () - system.nodes[0]->config.receive_minimum.number (), system.nodes[0]->balance (nano::dev_genesis_key.pub)); ASSERT_TRUE (system.nodes[0]->balance (key2.pub).is_zero ()); ASSERT_TIMELY (10s, std::all_of (system.nodes.begin (), system.nodes.end (), [&](std::shared_ptr<nano::node> const & node_a) { return !node_a->balance (key2.pub).is_zero (); })); } TEST (node, send_single_many_peers) { nano::system system (10); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (1)->insert_adhoc (key2.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, system.nodes[0]->config.receive_minimum.number ())); ASSERT_EQ (std::numeric_limits<nano::uint128_t>::max () - system.nodes[0]->config.receive_minimum.number (), system.nodes[0]->balance (nano::dev_genesis_key.pub)); ASSERT_TRUE (system.nodes[0]->balance (key2.pub).is_zero ()); ASSERT_TIMELY (3.5min, std::all_of (system.nodes.begin (), system.nodes.end (), [&](std::shared_ptr<nano::node> const & node_a) { return !node_a->balance (key2.pub).is_zero (); })); system.stop (); for (auto node : system.nodes) { ASSERT_TRUE (node->stopped); ASSERT_TRUE (node->network.tcp_channels.node_id_handhake_sockets_empty ()); } } TEST (node, send_out_of_order) { nano::system system (2); auto & node1 (*system.nodes[0]); nano::keypair key2; nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .previous (send1->hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - 2 * node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send1->hash ())) .build_shared (); auto send3 = builder.make_block () .previous (send2->hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - 3 * node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send2->hash ())) .build_shared (); node1.process_active (send3); node1.process_active (send2); node1.process_active (send1); ASSERT_TIMELY (10s, std::all_of (system.nodes.begin (), system.nodes.end (), [&](std::shared_ptr<nano::node> const & node_a) { return node_a->balance (nano::dev_genesis_key.pub) == nano::genesis_amount - node1.config.receive_minimum.number () * 3; })); } TEST (node, quick_confirm) { nano::system system (1); auto & node1 (*system.nodes[0]); nano::keypair key; nano::block_hash previous (node1.latest (nano::dev_genesis_key.pub)); auto genesis_start_balance (node1.balance (nano::dev_genesis_key.pub)); system.wallet (0)->insert_adhoc (key.prv); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto send = nano::send_block_builder () .previous (previous) .destination (key.pub) .balance (node1.online_reps.delta () + 1) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (previous)) .build_shared (); node1.process_active (send); ASSERT_TIMELY (10s, !node1.balance (key.pub).is_zero ()); ASSERT_EQ (node1.balance (nano::dev_genesis_key.pub), node1.online_reps.delta () + 1); ASSERT_EQ (node1.balance (key.pub), genesis_start_balance - (node1.online_reps.delta () + 1)); } TEST (node, node_receive_quorum) { nano::system system (1); auto & node1 = *system.nodes[0]; nano::keypair key; nano::block_hash previous (node1.latest (nano::dev_genesis_key.pub)); system.wallet (0)->insert_adhoc (key.prv); auto send = nano::send_block_builder () .previous (previous) .destination (key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (previous)) .build_shared (); node1.process_active (send); ASSERT_TIMELY (10s, node1.ledger.block_exists (send->hash ())); ASSERT_TIMELY (10s, node1.active.election (nano::qualified_root (previous, previous)) != nullptr); auto election (node1.active.election (nano::qualified_root (previous, previous))); ASSERT_NE (nullptr, election); ASSERT_FALSE (election->confirmed ()); ASSERT_EQ (1, election->votes ().size ()); nano::system system2; system2.add_node (); system2.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_TRUE (node1.balance (key.pub).is_zero ()); node1.network.tcp_channels.start_tcp (system2.nodes[0]->network.endpoint (), nano::keepalive_tcp_callback (node1)); while (node1.balance (key.pub).is_zero ()) { ASSERT_NO_ERROR (system.poll ()); ASSERT_NO_ERROR (system2.poll ()); } } TEST (node, auto_bootstrap) { nano::system system; nano::node_config config (nano::get_available_port (), system.logging); config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; nano::node_flags node_flags; node_flags.disable_bootstrap_bulk_push_client = true; node_flags.disable_lazy_bootstrap = true; auto node0 = system.add_node (config, node_flags); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->insert_adhoc (key2.prv); auto send1 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node0->config.receive_minimum.number ())); ASSERT_NE (nullptr, send1); ASSERT_TIMELY (10s, node0->balance (key2.pub) == node0->config.receive_minimum.number ()); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); ASSERT_FALSE (node1->init_error ()); node1->start (); system.nodes.push_back (node1); ASSERT_NE (nullptr, nano::establish_tcp (system, *node1, node0->network.endpoint ())); ASSERT_TIMELY (10s, node1->bootstrap_initiator.in_progress ()); ASSERT_TIMELY (10s, node1->balance (key2.pub) == node0->config.receive_minimum.number ()); ASSERT_TIMELY (10s, !node1->bootstrap_initiator.in_progress ()); ASSERT_TRUE (node1->ledger.block_exists (send1->hash ())); // Wait block receive ASSERT_TIMELY (5s, node1->ledger.cache.block_count == 3); // Confirmation for all blocks ASSERT_TIMELY (5s, node1->ledger.cache.cemented_count == 3); node1->stop (); } TEST (node, auto_bootstrap_reverse) { nano::system system; nano::node_config config (nano::get_available_port (), system.logging); config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; nano::node_flags node_flags; node_flags.disable_bootstrap_bulk_push_client = true; node_flags.disable_lazy_bootstrap = true; auto node0 = system.add_node (config, node_flags); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->insert_adhoc (key2.prv); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); ASSERT_FALSE (node1->init_error ()); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node0->config.receive_minimum.number ())); node1->start (); system.nodes.push_back (node1); ASSERT_NE (nullptr, nano::establish_tcp (system, *node0, node1->network.endpoint ())); ASSERT_TIMELY (10s, node1->balance (key2.pub) == node0->config.receive_minimum.number ()); } TEST (node, auto_bootstrap_age) { nano::system system; nano::node_config config (nano::get_available_port (), system.logging); config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; nano::node_flags node_flags; node_flags.disable_bootstrap_bulk_push_client = true; node_flags.disable_lazy_bootstrap = true; node_flags.bootstrap_interval = 1; auto node0 = system.add_node (config, node_flags); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); ASSERT_FALSE (node1->init_error ()); node1->start (); system.nodes.push_back (node1); ASSERT_NE (nullptr, nano::establish_tcp (system, *node1, node0->network.endpoint ())); ASSERT_TIMELY (10s, node1->bootstrap_initiator.in_progress ()); // 4 bootstraps with frontiers age ASSERT_TIMELY (10s, node0->stats.count (nano::stat::type::bootstrap, nano::stat::detail::initiate_legacy_age, nano::stat::dir::out) >= 3); // More attempts with frontiers age ASSERT_GE (node0->stats.count (nano::stat::type::bootstrap, nano::stat::detail::initiate_legacy_age, nano::stat::dir::out), node0->stats.count (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out)); node1->stop (); } TEST (node, receive_gap) { nano::system system (1); auto & node1 (*system.nodes[0]); ASSERT_EQ (0, node1.gap_cache.size ()); auto block = nano::send_block_builder () .previous (5) .destination (1) .balance (2) .sign (nano::keypair ().prv, 4) .work (0) .build_shared (); node1.work_generate_blocking (*block); nano::publish message (block); node1.network.process_message (message, node1.network.udp_channels.create (node1.network.endpoint ())); node1.block_processor.flush (); ASSERT_EQ (1, node1.gap_cache.size ()); } TEST (node, merge_peers) { nano::system system (1); std::array<nano::endpoint, 8> endpoints; endpoints.fill (nano::endpoint (boost::asio::ip::address_v6::loopback (), nano::get_available_port ())); endpoints[0] = nano::endpoint (boost::asio::ip::address_v6::loopback (), nano::get_available_port ()); system.nodes[0]->network.merge_peers (endpoints); ASSERT_EQ (0, system.nodes[0]->network.size ()); } TEST (node, search_pending) { nano::system system (1); auto node (system.nodes[0]); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); system.wallet (0)->insert_adhoc (key2.prv); ASSERT_FALSE (system.wallet (0)->search_pending (system.wallet (0)->wallets.tx_begin_read ())); ASSERT_TIMELY (10s, !node->balance (key2.pub).is_zero ()); } TEST (node, search_pending_same) { nano::system system (1); auto node (system.nodes[0]); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); system.wallet (0)->insert_adhoc (key2.prv); ASSERT_FALSE (system.wallet (0)->search_pending (system.wallet (0)->wallets.tx_begin_read ())); ASSERT_TIMELY (10s, node->balance (key2.pub) == 2 * node->config.receive_minimum.number ()); } TEST (node, search_pending_multiple) { nano::system system (1); auto node (system.nodes[0]); nano::keypair key2; nano::keypair key3; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->insert_adhoc (key3.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key3.pub, node->config.receive_minimum.number ())); ASSERT_TIMELY (10s, !node->balance (key3.pub).is_zero ()); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, system.wallet (0)->send_action (key3.pub, key2.pub, node->config.receive_minimum.number ())); system.wallet (0)->insert_adhoc (key2.prv); ASSERT_FALSE (system.wallet (0)->search_pending (system.wallet (0)->wallets.tx_begin_read ())); ASSERT_TIMELY (10s, node->balance (key2.pub) == 2 * node->config.receive_minimum.number ()); } TEST (node, search_pending_confirmed) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node = system.add_node (node_config); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto send1 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, send1); auto send2 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, send2); ASSERT_TIMELY (10s, node->active.empty ()); bool confirmed (false); system.deadline_set (5s); while (!confirmed) { auto transaction (node->store.tx_begin_read ()); confirmed = node->ledger.block_confirmed (transaction, send2->hash ()); ASSERT_NO_ERROR (system.poll ()); } { auto transaction (node->wallets.tx_begin_write ()); system.wallet (0)->store.erase (transaction, nano::dev_genesis_key.pub); } system.wallet (0)->insert_adhoc (key2.prv); ASSERT_FALSE (system.wallet (0)->search_pending (system.wallet (0)->wallets.tx_begin_read ())); { nano::lock_guard<nano::mutex> guard (node->active.mutex); auto existing1 (node->active.blocks.find (send1->hash ())); ASSERT_EQ (node->active.blocks.end (), existing1); auto existing2 (node->active.blocks.find (send2->hash ())); ASSERT_EQ (node->active.blocks.end (), existing2); } ASSERT_TIMELY (10s, node->balance (key2.pub) == 2 * node->config.receive_minimum.number ()); } TEST (node, search_pending_pruned) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node1 = system.add_node (node_config); nano::node_flags node_flags; node_flags.enable_pruning = true; nano::node_config config (nano::get_available_port (), system.logging); config.enable_voting = false; // Remove after allowing pruned voting auto node2 = system.add_node (config, node_flags); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto send1 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node2->config.receive_minimum.number ())); ASSERT_NE (nullptr, send1); auto send2 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node2->config.receive_minimum.number ())); ASSERT_NE (nullptr, send2); // Confirmation ASSERT_TIMELY (10s, node1->active.empty () && node2->active.empty ()); ASSERT_TIMELY (5s, node1->ledger.block_confirmed (node1->store.tx_begin_read (), send2->hash ())); ASSERT_TIMELY (5s, node2->ledger.cache.cemented_count == 3); system.wallet (0)->store.erase (node1->wallets.tx_begin_write (), nano::dev_genesis_key.pub); // Pruning { auto transaction (node2->store.tx_begin_write ()); ASSERT_EQ (1, node2->ledger.pruning_action (transaction, send1->hash (), 1)); } ASSERT_EQ (1, node2->ledger.cache.pruned_count); ASSERT_TRUE (node2->ledger.block_or_pruned_exists (send1->hash ())); ASSERT_FALSE (node2->ledger.block_exists (send1->hash ())); // Receive pruned block system.wallet (1)->insert_adhoc (key2.prv); ASSERT_FALSE (system.wallet (1)->search_pending (system.wallet (1)->wallets.tx_begin_read ())); { nano::lock_guard<nano::mutex> guard (node2->active.mutex); auto existing1 (node2->active.blocks.find (send1->hash ())); ASSERT_EQ (node2->active.blocks.end (), existing1); auto existing2 (node2->active.blocks.find (send2->hash ())); ASSERT_EQ (node2->active.blocks.end (), existing2); } ASSERT_TIMELY (10s, node2->balance (key2.pub) == 2 * node2->config.receive_minimum.number ()); } TEST (node, unlock_search) { nano::system system (1); auto node (system.nodes[0]); nano::keypair key2; nano::uint128_t balance (node->balance (nano::dev_genesis_key.pub)); { auto transaction (system.wallet (0)->wallets.tx_begin_write ()); system.wallet (0)->store.rekey (transaction, ""); } system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node->config.receive_minimum.number ())); ASSERT_TIMELY (10s, node->balance (nano::dev_genesis_key.pub) != balance); ASSERT_TIMELY (10s, node->active.empty ()); system.wallet (0)->insert_adhoc (key2.prv); { nano::lock_guard<std::recursive_mutex> lock (system.wallet (0)->store.mutex); system.wallet (0)->store.password.value_set (nano::keypair ().prv); } { auto transaction (system.wallet (0)->wallets.tx_begin_write ()); ASSERT_FALSE (system.wallet (0)->enter_password (transaction, "")); } ASSERT_TIMELY (10s, !node->balance (key2.pub).is_zero ()); } TEST (node, connect_after_junk) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto node0 = system.add_node (node_flags); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); std::vector<uint8_t> junk_buffer; junk_buffer.push_back (0); auto channel1 (std::make_shared<nano::transport::channel_udp> (node1->network.udp_channels, node0->network.endpoint (), node1->network_params.protocol.protocol_version)); channel1->send_buffer (nano::shared_const_buffer (std::move (junk_buffer)), [](boost::system::error_code const &, size_t) {}); ASSERT_TIMELY (10s, node0->stats.count (nano::stat::type::error) != 0); node1->start (); system.nodes.push_back (node1); auto channel2 (std::make_shared<nano::transport::channel_udp> (node1->network.udp_channels, node0->network.endpoint (), node1->network_params.protocol.protocol_version)); node1->network.send_keepalive (channel2); ASSERT_TIMELY (10s, !node1->network.empty ()); node1->stop (); } TEST (node, working) { auto path (nano::working_path ()); ASSERT_FALSE (path.empty ()); } TEST (node, price) { nano::system system (1); auto price1 (system.nodes[0]->price (nano::Gxrb_ratio, 1)); ASSERT_EQ (nano::node::price_max * 100.0, price1); auto price2 (system.nodes[0]->price (nano::Gxrb_ratio * int(nano::node::free_cutoff + 1), 1)); ASSERT_EQ (0, price2); auto price3 (system.nodes[0]->price (nano::Gxrb_ratio * int(nano::node::free_cutoff + 2) / 2, 1)); ASSERT_EQ (nano::node::price_max * 100.0 / 2, price3); auto price4 (system.nodes[0]->price (nano::Gxrb_ratio * int(nano::node::free_cutoff) * 2, 1)); ASSERT_EQ (0, price4); } TEST (node, confirm_locked) { nano::system system (1); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto transaction (system.wallet (0)->wallets.tx_begin_read ()); system.wallet (0)->enter_password (transaction, "1"); auto block = nano::send_block_builder () .previous (0) .destination (0) .balance (0) .sign (nano::keypair ().prv, 0) .work (0) .build_shared (); system.nodes[0]->network.flood_block (block); } TEST (node_config, serialization) { auto path (nano::unique_path ()); nano::logging logging1; logging1.init (path); nano::node_config config1 (100, logging1); config1.bootstrap_fraction_numerator = 10; config1.receive_minimum = 10; config1.online_weight_minimum = 10; config1.password_fanout = 20; config1.enable_voting = false; config1.callback_address = "dev"; config1.callback_port = 10; config1.callback_target = "dev"; config1.deprecated_lmdb_max_dbs = 256; nano::jsonconfig tree; config1.serialize_json (tree); nano::logging logging2; logging2.init (path); logging2.node_lifetime_tracing_value = !logging2.node_lifetime_tracing_value; nano::node_config config2 (50, logging2); ASSERT_NE (config2.bootstrap_fraction_numerator, config1.bootstrap_fraction_numerator); ASSERT_NE (config2.peering_port, config1.peering_port); ASSERT_NE (config2.logging.node_lifetime_tracing_value, config1.logging.node_lifetime_tracing_value); ASSERT_NE (config2.online_weight_minimum, config1.online_weight_minimum); ASSERT_NE (config2.password_fanout, config1.password_fanout); ASSERT_NE (config2.enable_voting, config1.enable_voting); ASSERT_NE (config2.callback_address, config1.callback_address); ASSERT_NE (config2.callback_port, config1.callback_port); ASSERT_NE (config2.callback_target, config1.callback_target); ASSERT_NE (config2.deprecated_lmdb_max_dbs, config1.deprecated_lmdb_max_dbs); ASSERT_FALSE (tree.get_optional<std::string> ("epoch_block_link")); ASSERT_FALSE (tree.get_optional<std::string> ("epoch_block_signer")); bool upgraded (false); ASSERT_FALSE (config2.deserialize_json (upgraded, tree)); ASSERT_FALSE (upgraded); ASSERT_EQ (config2.bootstrap_fraction_numerator, config1.bootstrap_fraction_numerator); ASSERT_EQ (config2.peering_port, config1.peering_port); ASSERT_EQ (config2.logging.node_lifetime_tracing_value, config1.logging.node_lifetime_tracing_value); ASSERT_EQ (config2.online_weight_minimum, config1.online_weight_minimum); ASSERT_EQ (config2.password_fanout, config1.password_fanout); ASSERT_EQ (config2.enable_voting, config1.enable_voting); ASSERT_EQ (config2.callback_address, config1.callback_address); ASSERT_EQ (config2.callback_port, config1.callback_port); ASSERT_EQ (config2.callback_target, config1.callback_target); ASSERT_EQ (config2.deprecated_lmdb_max_dbs, config1.deprecated_lmdb_max_dbs); } TEST (node_config, v17_values) { nano::jsonconfig tree; add_required_children_node_config_tree (tree); auto path (nano::unique_path ()); auto upgraded (false); nano::node_config config; config.logging.init (path); // Check config is correct { tree.put ("tcp_io_timeout", 1); tree.put ("pow_sleep_interval", 0); tree.put ("external_address", "::1"); tree.put ("external_port", 0); tree.put ("tcp_incoming_connections_max", 1); tree.put ("vote_generator_delay", 50); tree.put ("vote_generator_threshold", 3); nano::jsonconfig txn_tracking_l; txn_tracking_l.put ("enable", false); txn_tracking_l.put ("min_read_txn_time", 0); txn_tracking_l.put ("min_write_txn_time", 0); txn_tracking_l.put ("ignore_writes_below_block_processor_max_time", true); nano::jsonconfig diagnostics_l; diagnostics_l.put_child ("txn_tracking", txn_tracking_l); tree.put_child ("diagnostics", diagnostics_l); tree.put ("use_memory_pools", true); tree.put ("confirmation_history_size", 2048); tree.put ("active_elections_size", 50000); tree.put ("bandwidth_limit", 10485760); tree.put ("conf_height_processor_batch_min_time", 0); } config.deserialize_json (upgraded, tree); ASSERT_FALSE (upgraded); ASSERT_EQ (config.tcp_io_timeout.count (), 1); ASSERT_EQ (config.pow_sleep_interval.count (), 0); ASSERT_EQ (config.external_address, "::1"); ASSERT_EQ (config.external_port, 0); ASSERT_EQ (config.tcp_incoming_connections_max, 1); ASSERT_FALSE (config.diagnostics_config.txn_tracking.enable); ASSERT_EQ (config.diagnostics_config.txn_tracking.min_read_txn_time.count (), 0); ASSERT_EQ (config.diagnostics_config.txn_tracking.min_write_txn_time.count (), 0); ASSERT_TRUE (config.diagnostics_config.txn_tracking.ignore_writes_below_block_processor_max_time); ASSERT_TRUE (config.use_memory_pools); ASSERT_EQ (config.confirmation_history_size, 2048); ASSERT_EQ (config.active_elections_size, 50000); ASSERT_EQ (config.bandwidth_limit, 10485760); ASSERT_EQ (config.conf_height_processor_batch_min_time.count (), 0); // Check config is correct with other values tree.put ("tcp_io_timeout", std::numeric_limits<unsigned long>::max () - 100); tree.put ("pow_sleep_interval", std::numeric_limits<unsigned long>::max () - 100); tree.put ("external_address", "::ffff:192.168.1.1"); tree.put ("external_port", std::numeric_limits<uint16_t>::max () - 1); tree.put ("tcp_incoming_connections_max", std::numeric_limits<unsigned>::max ()); tree.put ("vote_generator_delay", std::numeric_limits<unsigned long>::max () - 100); tree.put ("vote_generator_threshold", 10); nano::jsonconfig txn_tracking_l; txn_tracking_l.put ("enable", true); txn_tracking_l.put ("min_read_txn_time", 1234); txn_tracking_l.put ("min_write_txn_time", std::numeric_limits<unsigned>::max ()); txn_tracking_l.put ("ignore_writes_below_block_processor_max_time", false); nano::jsonconfig diagnostics_l; diagnostics_l.replace_child ("txn_tracking", txn_tracking_l); tree.replace_child ("diagnostics", diagnostics_l); tree.put ("use_memory_pools", false); tree.put ("confirmation_history_size", std::numeric_limits<unsigned long long>::max ()); tree.put ("active_elections_size", std::numeric_limits<unsigned long long>::max ()); tree.put ("bandwidth_limit", std::numeric_limits<size_t>::max ()); tree.put ("conf_height_processor_batch_min_time", 500); upgraded = false; config.deserialize_json (upgraded, tree); ASSERT_FALSE (upgraded); ASSERT_EQ (config.tcp_io_timeout.count (), std::numeric_limits<unsigned long>::max () - 100); ASSERT_EQ (config.pow_sleep_interval.count (), std::numeric_limits<unsigned long>::max () - 100); ASSERT_EQ (config.external_address, "::ffff:192.168.1.1"); ASSERT_EQ (config.external_port, std::numeric_limits<uint16_t>::max () - 1); ASSERT_EQ (config.tcp_incoming_connections_max, std::numeric_limits<unsigned>::max ()); ASSERT_EQ (config.vote_generator_delay.count (), std::numeric_limits<unsigned long>::max () - 100); ASSERT_EQ (config.vote_generator_threshold, 10); ASSERT_TRUE (config.diagnostics_config.txn_tracking.enable); ASSERT_EQ (config.diagnostics_config.txn_tracking.min_read_txn_time.count (), 1234); ASSERT_EQ (config.tcp_incoming_connections_max, std::numeric_limits<unsigned>::max ()); ASSERT_EQ (config.diagnostics_config.txn_tracking.min_write_txn_time.count (), std::numeric_limits<unsigned>::max ()); ASSERT_FALSE (config.diagnostics_config.txn_tracking.ignore_writes_below_block_processor_max_time); ASSERT_FALSE (config.use_memory_pools); ASSERT_EQ (config.confirmation_history_size, std::numeric_limits<unsigned long long>::max ()); ASSERT_EQ (config.active_elections_size, std::numeric_limits<unsigned long long>::max ()); ASSERT_EQ (config.bandwidth_limit, std::numeric_limits<size_t>::max ()); ASSERT_EQ (config.conf_height_processor_batch_min_time.count (), 500); } TEST (node_config, v17_v18_upgrade) { auto path (nano::unique_path ()); nano::jsonconfig tree; add_required_children_node_config_tree (tree); tree.put ("version", "17"); auto upgraded (false); nano::node_config config; config.logging.init (path); // Initial values for configs that should be upgraded config.active_elections_size = 50000; config.vote_generator_delay = 500ms; // These config options should not be present ASSERT_FALSE (tree.get_optional_child ("backup_before_upgrade")); ASSERT_FALSE (tree.get_optional_child ("work_watcher_period")); config.deserialize_json (upgraded, tree); // These configs should have been upgraded ASSERT_EQ (100, tree.get<unsigned> ("vote_generator_delay")); ASSERT_EQ (10000, tree.get<unsigned long long> ("active_elections_size")); // The config options should be added after the upgrade ASSERT_TRUE (!!tree.get_optional_child ("backup_before_upgrade")); ASSERT_TRUE (!!tree.get_optional_child ("work_watcher_period")); ASSERT_TRUE (upgraded); auto version (tree.get<std::string> ("version")); // Check version is updated ASSERT_GT (std::stoull (version), 17); } TEST (node_config, v18_values) { nano::jsonconfig tree; add_required_children_node_config_tree (tree); auto path (nano::unique_path ()); auto upgraded (false); nano::node_config config; config.logging.init (path); // Check config is correct { tree.put ("active_elections_size", 10000); tree.put ("vote_generator_delay", 100); tree.put ("backup_before_upgrade", true); tree.put ("work_watcher_period", 5); } config.deserialize_json (upgraded, tree); ASSERT_FALSE (upgraded); ASSERT_EQ (config.active_elections_size, 10000); ASSERT_EQ (config.vote_generator_delay.count (), 100); ASSERT_EQ (config.backup_before_upgrade, true); ASSERT_EQ (config.work_watcher_period.count (), 5); // Check config is correct with other values tree.put ("active_elections_size", 5); tree.put ("vote_generator_delay", std::numeric_limits<unsigned long>::max () - 100); tree.put ("backup_before_upgrade", false); tree.put ("work_watcher_period", 999); upgraded = false; config.deserialize_json (upgraded, tree); ASSERT_FALSE (upgraded); ASSERT_EQ (config.active_elections_size, 5); ASSERT_EQ (config.vote_generator_delay.count (), std::numeric_limits<unsigned long>::max () - 100); ASSERT_EQ (config.backup_before_upgrade, false); ASSERT_EQ (config.work_watcher_period.count (), 999); } // Regression test to ensure that deserializing includes changes node via get_required_child TEST (node_config, required_child) { auto path (nano::unique_path ()); nano::logging logging1; nano::logging logging2; logging1.init (path); nano::jsonconfig tree; nano::jsonconfig logging_l; logging1.serialize_json (logging_l); tree.put_child ("logging", logging_l); auto child_l (tree.get_required_child ("logging")); child_l.put<bool> ("flush", !logging1.flush); bool upgraded (false); logging2.deserialize_json (upgraded, child_l); ASSERT_NE (logging1.flush, logging2.flush); } TEST (node_config, random_rep) { auto path (nano::unique_path ()); nano::logging logging1; logging1.init (path); nano::node_config config1 (100, logging1); auto rep (config1.random_representative ()); ASSERT_NE (config1.preconfigured_representatives.end (), std::find (config1.preconfigured_representatives.begin (), config1.preconfigured_representatives.end (), rep)); } TEST (node_config, unsupported_version_upgrade) { auto path (nano::unique_path ()); nano::logging logging1; logging1.init (path); nano::node_config node_config (100, logging1); nano::jsonconfig config; node_config.serialize_json (config); config.put ("version", "16"); // Version 16 and earlier is no longer supported for direct upgrade nano::node_config node_config1; bool upgraded{ false }; auto err = node_config1.deserialize_json (upgraded, config); ASSERT_FALSE (upgraded); ASSERT_TRUE (err); } class json_initial_value_test final { public: explicit json_initial_value_test (std::string const & text_a) : text (text_a) { } nano::error serialize_json (nano::jsonconfig & json) { json.put ("thing", text); return json.get_error (); } std::string text; }; class json_upgrade_test final { public: nano::error deserialize_json (bool & upgraded, nano::jsonconfig & json) { if (!json.empty ()) { auto text_l (json.get<std::string> ("thing")); if (text_l == "junktest" || text_l == "created") { upgraded = true; text_l = "changed"; json.put ("thing", text_l); } if (text_l == "error") { json.get_error () = nano::error_common::generic; } text = text_l; } else { upgraded = true; text = "created"; json.put ("thing", text); } return json.get_error (); } std::string text; }; /** Both create and upgrade via read_and_update() */ TEST (json, create_and_upgrade) { auto path (nano::unique_path ()); nano::jsonconfig json; json_upgrade_test object1; ASSERT_FALSE (json.read_and_update (object1, path)); ASSERT_EQ ("created", object1.text); nano::jsonconfig json2; json_upgrade_test object2; ASSERT_FALSE (json2.read_and_update (object2, path)); ASSERT_EQ ("changed", object2.text); } /** Create config manually, then upgrade via read_and_update() with multiple calls to test idempotence */ TEST (json, upgrade_from_existing) { auto path (nano::unique_path ()); nano::jsonconfig json; json_initial_value_test junktest ("junktest"); junktest.serialize_json (json); json.write (path); json_upgrade_test object1; ASSERT_FALSE (json.read_and_update (object1, path)); ASSERT_EQ ("changed", object1.text); ASSERT_FALSE (json.read_and_update (object1, path)); ASSERT_EQ ("changed", object1.text); } /** Test that backups are made only when there is an upgrade */ TEST (json, backup) { auto dir (nano::unique_path ()); namespace fs = boost::filesystem; fs::create_directory (dir); auto path = dir / dir.leaf (); // Create json file nano::jsonconfig json; json_upgrade_test object1; ASSERT_FALSE (json.read_and_update (object1, path)); ASSERT_EQ ("created", object1.text); /** Returns 'dir' if backup file cannot be found */ auto get_backup_path = [&dir]() { for (fs::directory_iterator itr (dir); itr != fs::directory_iterator (); ++itr) { if (itr->path ().filename ().string ().find ("_backup_") != std::string::npos) { return itr->path (); } } return dir; }; auto get_file_count = [&dir]() { return std::count_if (boost::filesystem::directory_iterator (dir), boost::filesystem::directory_iterator (), static_cast<bool (*) (const boost::filesystem::path &)> (boost::filesystem::is_regular_file)); }; // There should only be the original file in this directory ASSERT_EQ (get_file_count (), 1); ASSERT_EQ (get_backup_path (), dir); // Upgrade, check that there is a backup which matches the first object ASSERT_FALSE (json.read_and_update (object1, path)); ASSERT_EQ (get_file_count (), 2); ASSERT_NE (get_backup_path (), path); // Check there is a backup which has the same contents as the original file nano::jsonconfig json1; ASSERT_FALSE (json1.read (get_backup_path ())); ASSERT_EQ (json1.get<std::string> ("thing"), "created"); // Try and upgrade an already upgraded file, should not create any backups ASSERT_FALSE (json.read_and_update (object1, path)); ASSERT_EQ (get_file_count (), 2); } TEST (node_flags, disable_tcp_realtime) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto node1 = system.add_node (node_flags); node_flags.disable_tcp_realtime = true; auto node2 = system.add_node (node_flags); ASSERT_EQ (1, node1->network.size ()); auto list1 (node1->network.list (2)); ASSERT_EQ (node2->network.endpoint (), list1[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::udp, list1[0]->get_type ()); ASSERT_EQ (1, node2->network.size ()); auto list2 (node2->network.list (2)); ASSERT_EQ (node1->network.endpoint (), list2[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::udp, list2[0]->get_type ()); } TEST (node_flags, disable_tcp_realtime_and_bootstrap_listener) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto node1 = system.add_node (node_flags); node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; auto node2 = system.add_node (node_flags); ASSERT_EQ (nano::tcp_endpoint (boost::asio::ip::address_v6::loopback (), 0), node2->bootstrap.endpoint ()); ASSERT_NE (nano::endpoint (boost::asio::ip::address_v6::loopback (), 0), node2->network.endpoint ()); ASSERT_EQ (1, node1->network.size ()); auto list1 (node1->network.list (2)); ASSERT_EQ (node2->network.endpoint (), list1[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::udp, list1[0]->get_type ()); ASSERT_EQ (1, node2->network.size ()); auto list2 (node2->network.list (2)); ASSERT_EQ (node1->network.endpoint (), list2[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::udp, list2[0]->get_type ()); } // UDP is disabled by default TEST (node_flags, disable_udp) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto node1 = system.add_node (node_flags); auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::unique_path (), nano::node_config (nano::get_available_port (), system.logging), system.work)); system.nodes.push_back (node2); node2->start (); ASSERT_EQ (nano::endpoint (boost::asio::ip::address_v6::loopback (), 0), node2->network.udp_channels.get_local_endpoint ()); ASSERT_NE (nano::endpoint (boost::asio::ip::address_v6::loopback (), 0), node2->network.endpoint ()); // Send UDP message auto channel (std::make_shared<nano::transport::channel_udp> (node1->network.udp_channels, node2->network.endpoint (), node2->network_params.protocol.protocol_version)); node1->network.send_keepalive (channel); std::this_thread::sleep_for (std::chrono::milliseconds (500)); // Check empty network ASSERT_EQ (0, node1->network.size ()); ASSERT_EQ (0, node2->network.size ()); // Send TCP handshake node1->network.merge_peer (node2->network.endpoint ()); ASSERT_TIMELY (5s, node1->bootstrap.realtime_count == 1 && node2->bootstrap.realtime_count == 1); ASSERT_EQ (1, node1->network.size ()); auto list1 (node1->network.list (2)); ASSERT_EQ (node2->network.endpoint (), list1[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::tcp, list1[0]->get_type ()); ASSERT_EQ (1, node2->network.size ()); auto list2 (node2->network.list (2)); ASSERT_EQ (node1->network.endpoint (), list2[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::tcp, list2[0]->get_type ()); node2->stop (); } TEST (node, fork_publish) { std::weak_ptr<nano::node> node0; { nano::system system (1); node0 = system.nodes[0]; auto & node1 (*system.nodes[0]); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key1; nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); node1.work_generate_blocking (*send1); nano::keypair key2; auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); node1.work_generate_blocking (*send2); node1.process_active (send1); node1.block_processor.flush (); ASSERT_EQ (1, node1.active.size ()); auto election (node1.active.election (send1->qualified_root ())); ASSERT_NE (nullptr, election); // Wait until the genesis rep activated & makes vote ASSERT_TIMELY (1s, election->votes ().size () == 2); node1.process_active (send2); node1.block_processor.flush (); auto votes1 (election->votes ()); auto existing1 (votes1.find (nano::dev_genesis_key.pub)); ASSERT_NE (votes1.end (), existing1); ASSERT_EQ (send1->hash (), existing1->second.hash); auto winner (*election->tally ().begin ()); ASSERT_EQ (*send1, *winner.second); ASSERT_EQ (nano::genesis_amount - 100, winner.first); } ASSERT_TRUE (node0.expired ()); } // Tests that an election gets started correctly from a fork TEST (node, fork_publish_inactive) { nano::system system (1); nano::genesis genesis; nano::keypair key1; nano::keypair key2; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (send1->block_work ()) .build_shared (); auto & node = *system.nodes[0]; node.process_active (send1); ASSERT_TIMELY (3s, nullptr != node.block (send1->hash ())); ASSERT_EQ (nano::process_result::fork, node.process_local (send2).code); auto election = node.active.election (send1->qualified_root ()); ASSERT_NE (election, nullptr); auto blocks = election->blocks (); ASSERT_NE (blocks.end (), blocks.find (send1->hash ())); ASSERT_NE (blocks.end (), blocks.find (send2->hash ())); ASSERT_EQ (election->winner ()->hash (), send1->hash ()); ASSERT_NE (election->winner ()->hash (), send2->hash ()); } TEST (node, fork_keep) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); ASSERT_EQ (1, node1.network.size ()); nano::keypair key1; nano::keypair key2; nano::genesis genesis; nano::send_block_builder builder; // send1 and send2 fork to different accounts auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); node1.process_active (send1); node1.block_processor.flush (); node2.process_active (send1); node2.block_processor.flush (); ASSERT_EQ (1, node1.active.size ()); ASSERT_EQ (1, node2.active.size ()); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); node1.process_active (send2); node1.block_processor.flush (); node2.process_active (send2); node2.block_processor.flush (); auto election1 (node2.active.election (nano::qualified_root (genesis.hash (), genesis.hash ()))); ASSERT_NE (nullptr, election1); ASSERT_EQ (1, election1->votes ().size ()); ASSERT_TRUE (node1.ledger.block_exists (send1->hash ())); ASSERT_TRUE (node2.ledger.block_exists (send1->hash ())); // Wait until the genesis rep makes a vote ASSERT_TIMELY (1.5min, election1->votes ().size () != 1); auto transaction0 (node1.store.tx_begin_read ()); auto transaction1 (node2.store.tx_begin_read ()); // The vote should be in agreement with what we already have. auto winner (*election1->tally ().begin ()); ASSERT_EQ (*send1, *winner.second); ASSERT_EQ (nano::genesis_amount - 100, winner.first); ASSERT_TRUE (node1.store.block_exists (transaction0, send1->hash ())); ASSERT_TRUE (node2.store.block_exists (transaction1, send1->hash ())); } TEST (node, fork_flip) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); ASSERT_EQ (1, node1.network.size ()); nano::keypair key1; nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); nano::publish publish1 (send1); nano::keypair key2; auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); nano::publish publish2 (send2); auto channel1 (node1.network.udp_channels.create (node1.network.endpoint ())); node1.network.process_message (publish1, channel1); node1.block_processor.flush (); auto channel2 (node2.network.udp_channels.create (node1.network.endpoint ())); node2.network.process_message (publish2, channel2); node2.block_processor.flush (); ASSERT_EQ (1, node1.active.size ()); ASSERT_EQ (1, node2.active.size ()); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); node1.network.process_message (publish2, channel1); node1.block_processor.flush (); node2.network.process_message (publish1, channel2); node2.block_processor.flush (); auto election1 (node2.active.election (nano::qualified_root (genesis.hash (), genesis.hash ()))); ASSERT_NE (nullptr, election1); ASSERT_EQ (1, election1->votes ().size ()); ASSERT_NE (nullptr, node1.block (publish1.block->hash ())); ASSERT_NE (nullptr, node2.block (publish2.block->hash ())); ASSERT_TIMELY (10s, node2.ledger.block_exists (publish1.block->hash ())); auto winner (*election1->tally ().begin ()); ASSERT_EQ (*publish1.block, *winner.second); ASSERT_EQ (nano::genesis_amount - 100, winner.first); ASSERT_TRUE (node1.ledger.block_exists (publish1.block->hash ())); ASSERT_TRUE (node2.ledger.block_exists (publish1.block->hash ())); ASSERT_FALSE (node2.ledger.block_exists (publish2.block->hash ())); } TEST (node, fork_multi_flip) { std::vector<nano::transport::transport_type> types{ nano::transport::transport_type::tcp, nano::transport::transport_type::udp }; for (auto & type : types) { nano::system system; nano::node_flags node_flags; if (type == nano::transport::transport_type::udp) { node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_udp = false; } nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto & node1 (*system.add_node (node_config, node_flags, type)); node_config.peering_port = nano::get_available_port (); auto & node2 (*system.add_node (node_config, node_flags, type)); ASSERT_EQ (1, node1.network.size ()); nano::keypair key1; nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); nano::publish publish1 (send1); nano::keypair key2; auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); nano::publish publish2 (send2); auto send3 = builder.make_block () .previous (publish2.block->hash ()) .destination (key2.pub) .balance (nano::genesis_amount - 100) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (publish2.block->hash ())) .build_shared (); nano::publish publish3 (send3); node1.network.process_message (publish1, node1.network.udp_channels.create (node1.network.endpoint ())); node2.network.process_message (publish2, node2.network.udp_channels.create (node2.network.endpoint ())); node2.network.process_message (publish3, node2.network.udp_channels.create (node2.network.endpoint ())); node1.block_processor.flush (); node2.block_processor.flush (); ASSERT_EQ (1, node1.active.size ()); ASSERT_EQ (1, node2.active.size ()); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); node1.network.process_message (publish2, node1.network.udp_channels.create (node1.network.endpoint ())); node1.network.process_message (publish3, node1.network.udp_channels.create (node1.network.endpoint ())); node1.block_processor.flush (); node2.network.process_message (publish1, node2.network.udp_channels.create (node2.network.endpoint ())); node2.block_processor.flush (); auto election1 (node2.active.election (nano::qualified_root (genesis.hash (), genesis.hash ()))); ASSERT_NE (nullptr, election1); ASSERT_EQ (1, election1->votes ().size ()); ASSERT_TRUE (node1.ledger.block_exists (publish1.block->hash ())); ASSERT_TRUE (node2.ledger.block_exists (publish2.block->hash ())); ASSERT_TRUE (node2.ledger.block_exists (publish3.block->hash ())); ASSERT_TIMELY (10s, node2.ledger.block_exists (publish1.block->hash ())); auto winner (*election1->tally ().begin ()); ASSERT_EQ (*publish1.block, *winner.second); ASSERT_EQ (nano::genesis_amount - 100, winner.first); ASSERT_TRUE (node1.ledger.block_exists (publish1.block->hash ())); ASSERT_TRUE (node2.ledger.block_exists (publish1.block->hash ())); ASSERT_FALSE (node2.ledger.block_exists (publish2.block->hash ())); ASSERT_FALSE (node2.ledger.block_exists (publish3.block->hash ())); } } // Blocks that are no longer actively being voted on should be able to be evicted through bootstrapping. // This could happen if a fork wasn't resolved before the process previously shut down TEST (node, fork_bootstrap_flip) { nano::system system0; nano::system system1; nano::node_config config0{ nano::get_available_port (), system0.logging }; config0.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; nano::node_flags node_flags; node_flags.disable_bootstrap_bulk_push_client = true; node_flags.disable_lazy_bootstrap = true; auto & node1 = *system0.add_node (config0, node_flags); nano::node_config config1 (nano::get_available_port (), system1.logging); auto & node2 = *system1.add_node (config1, node_flags); system0.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::block_hash latest = node1.latest (nano::dev_genesis_key.pub); nano::keypair key1; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (latest) .destination (key1.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system0.work.generate (latest)) .build_shared (); nano::keypair key2; auto send2 = builder.make_block () .previous (latest) .destination (key2.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system0.work.generate (latest)) .build_shared (); // Insert but don't rebroadcast, simulating settled blocks ASSERT_EQ (nano::process_result::progress, node1.ledger.process (node1.store.tx_begin_write (), *send1).code); ASSERT_EQ (nano::process_result::progress, node2.ledger.process (node2.store.tx_begin_write (), *send2).code); ASSERT_TRUE (node2.store.block_exists (node2.store.tx_begin_read (), send2->hash ())); node2.bootstrap_initiator.bootstrap (node1.network.endpoint ()); // Additionally add new peer to confirm & replace bootstrap block auto again (true); system1.deadline_set (50s); while (again) { ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); again = !node2.store.block_exists (node2.store.tx_begin_read (), send1->hash ()); } } TEST (node, fork_open) { nano::system system (1); auto & node1 (*system.nodes[0]); nano::keypair key1; nano::genesis genesis; auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key1.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); nano::publish publish1 (send1); auto channel1 (node1.network.udp_channels.create (node1.network.endpoint ())); node1.network.process_message (publish1, channel1); node1.block_processor.flush (); auto election = node1.active.election (publish1.block->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (3s, node1.active.empty () && node1.block_confirmed (publish1.block->hash ())); nano::open_block_builder builder; auto open1 = builder.make_block () .source (publish1.block->hash ()) .representative (1) .account (key1.pub) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1.pub)) .build_shared (); nano::publish publish2 (open1); node1.network.process_message (publish2, channel1); node1.block_processor.flush (); ASSERT_EQ (1, node1.active.size ()); auto open2 = builder.make_block () .source (publish1.block->hash ()) .representative (2) .account (key1.pub) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1.pub)) .build_shared (); nano::publish publish3 (open2); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); node1.network.process_message (publish3, channel1); node1.block_processor.flush (); election = node1.active.election (publish3.block->qualified_root ()); ASSERT_EQ (2, election->blocks ().size ()); ASSERT_EQ (publish2.block->hash (), election->winner ()->hash ()); ASSERT_FALSE (election->confirmed ()); ASSERT_TRUE (node1.block (publish2.block->hash ())); ASSERT_FALSE (node1.block (publish3.block->hash ())); } TEST (node, fork_open_flip) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); ASSERT_EQ (1, node1.network.size ()); nano::keypair key1; nano::genesis genesis; nano::keypair rep1; nano::keypair rep2; auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - 1) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); // A copy is necessary to avoid data races during ledger processing, which sets the sideband auto send1_copy (std::make_shared<nano::send_block> (*send1)); node1.process_active (send1); node2.process_active (send1_copy); // We should be keeping this block nano::open_block_builder builder; auto open1 = builder.make_block () .source (send1->hash ()) .representative (rep1.pub) .account (key1.pub) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1.pub)) .build_shared (); // This block should be evicted auto open2 = builder.make_block () .source (send1->hash ()) .representative (rep2.pub) .account (key1.pub) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1.pub)) .build_shared (); ASSERT_FALSE (*open1 == *open2); // node1 gets copy that will remain node1.process_active (open1); node1.block_processor.flush (); node1.block_confirm (open1); // node2 gets copy that will be evicted node2.process_active (open2); node2.block_processor.flush (); node2.block_confirm (open2); ASSERT_EQ (2, node1.active.size ()); ASSERT_EQ (2, node2.active.size ()); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); // Notify both nodes that a fork exists node1.process_active (open2); node1.block_processor.flush (); node2.process_active (open1); node2.block_processor.flush (); auto election1 (node2.active.election (open1->qualified_root ())); ASSERT_NE (nullptr, election1); ASSERT_EQ (1, election1->votes ().size ()); ASSERT_TRUE (node1.block (open1->hash ()) != nullptr); ASSERT_TRUE (node2.block (open2->hash ()) != nullptr); // Node2 should eventually settle on open1 ASSERT_TIMELY (10s, node2.block (open1->hash ())); node2.block_processor.flush (); auto transaction1 (node1.store.tx_begin_read ()); auto transaction2 (node2.store.tx_begin_read ()); auto winner (*election1->tally ().begin ()); ASSERT_EQ (*open1, *winner.second); ASSERT_EQ (nano::genesis_amount - 1, winner.first); ASSERT_TRUE (node1.store.block_exists (transaction1, open1->hash ())); ASSERT_TRUE (node2.store.block_exists (transaction2, open1->hash ())); ASSERT_FALSE (node2.store.block_exists (transaction2, open2->hash ())); } TEST (node, coherent_observer) { nano::system system (1); auto & node1 (*system.nodes[0]); node1.observers.blocks.add ([&node1](nano::election_status const & status_a, std::vector<nano::vote_with_weight_info> const &, nano::account const &, nano::uint128_t const &, bool) { auto transaction (node1.store.tx_begin_read ()); ASSERT_TRUE (node1.store.block_exists (transaction, status_a.winner->hash ())); }); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key; system.wallet (0)->send_action (nano::dev_genesis_key.pub, key.pub, 1); } TEST (node, fork_no_vote_quorum) { nano::system system (3); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); auto & node3 (*system.nodes[2]); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto key4 (system.wallet (0)->deterministic_insert ()); system.wallet (0)->send_action (nano::dev_genesis_key.pub, key4, nano::genesis_amount / 4); auto key1 (system.wallet (1)->deterministic_insert ()); { auto transaction (system.wallet (1)->wallets.tx_begin_write ()); system.wallet (1)->store.representative_set (transaction, key1); } auto block (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key1, node1.config.receive_minimum.number ())); ASSERT_NE (nullptr, block); ASSERT_TIMELY (30s, node3.balance (key1) == node1.config.receive_minimum.number () && node2.balance (key1) == node1.config.receive_minimum.number () && node1.balance (key1) == node1.config.receive_minimum.number ()); ASSERT_EQ (node1.config.receive_minimum.number (), node1.weight (key1)); ASSERT_EQ (node1.config.receive_minimum.number (), node2.weight (key1)); ASSERT_EQ (node1.config.receive_minimum.number (), node3.weight (key1)); nano::state_block send1 (nano::dev_genesis_key.pub, block->hash (), nano::dev_genesis_key.pub, (nano::genesis_amount / 4) - (node1.config.receive_minimum.number () * 2), key1, nano::dev_genesis_key.prv, nano::dev_genesis_key.pub, *system.work.generate (block->hash ())); ASSERT_EQ (nano::process_result::progress, node1.process (send1).code); ASSERT_EQ (nano::process_result::progress, node2.process (send1).code); ASSERT_EQ (nano::process_result::progress, node3.process (send1).code); auto key2 (system.wallet (2)->deterministic_insert ()); auto send2 = nano::send_block_builder () .previous (block->hash ()) .destination (key2) .balance ((nano::genesis_amount / 4) - (node1.config.receive_minimum.number () * 2)) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (block->hash ())) .build_shared (); nano::raw_key key3; auto transaction (system.wallet (1)->wallets.tx_begin_read ()); ASSERT_FALSE (system.wallet (1)->store.fetch (transaction, key1, key3)); auto vote (std::make_shared<nano::vote> (key1, key3, 0, send2)); nano::confirm_ack confirm (vote); std::vector<uint8_t> buffer; { nano::vectorstream stream (buffer); confirm.serialize (stream); } auto channel = node2.network.find_node_id (node3.node_id.pub); ASSERT_NE (nullptr, channel); channel->send_buffer (nano::shared_const_buffer (std::move (buffer))); ASSERT_TIMELY (10s, node3.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::in) >= 3); ASSERT_TRUE (node1.latest (nano::dev_genesis_key.pub) == send1.hash ()); ASSERT_TRUE (node2.latest (nano::dev_genesis_key.pub) == send1.hash ()); ASSERT_TRUE (node3.latest (nano::dev_genesis_key.pub) == send1.hash ()); } // Disabled because it sometimes takes way too long (but still eventually finishes) TEST (node, DISABLED_fork_pre_confirm) { nano::system system (3); auto & node0 (*system.nodes[0]); auto & node1 (*system.nodes[1]); auto & node2 (*system.nodes[2]); nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key1; system.wallet (1)->insert_adhoc (key1.prv); { auto transaction (system.wallet (1)->wallets.tx_begin_write ()); system.wallet (1)->store.representative_set (transaction, key1.pub); } nano::keypair key2; system.wallet (2)->insert_adhoc (key2.prv); { auto transaction (system.wallet (2)->wallets.tx_begin_write ()); system.wallet (2)->store.representative_set (transaction, key2.pub); } auto block0 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key1.pub, nano::genesis_amount / 3)); ASSERT_NE (nullptr, block0); ASSERT_TIMELY (30s, node0.balance (key1.pub) != 0); auto block1 (system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, nano::genesis_amount / 3)); ASSERT_NE (nullptr, block1); ASSERT_TIMELY (30s, node0.balance (key2.pub) != 0); nano::keypair key3; nano::keypair key4; nano::state_block_builder builder; auto block2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (node0.latest (nano::dev_genesis_key.pub)) .representative (key3.pub) .balance (node0.balance (nano::dev_genesis_key.pub)) .link (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); auto block3 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (node0.latest (nano::dev_genesis_key.pub)) .representative (key4.pub) .balance (node0.balance (nano::dev_genesis_key.pub)) .link (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); node0.work_generate_blocking (*block2); node0.work_generate_blocking (*block3); node0.process_active (block2); node1.process_active (block2); node2.process_active (block3); auto done (false); // Extend deadline; we must finish within a total of 100 seconds system.deadline_set (70s); while (!done) { done |= node0.latest (nano::dev_genesis_key.pub) == block2->hash () && node1.latest (nano::dev_genesis_key.pub) == block2->hash () && node2.latest (nano::dev_genesis_key.pub) == block2->hash (); done |= node0.latest (nano::dev_genesis_key.pub) == block3->hash () && node1.latest (nano::dev_genesis_key.pub) == block3->hash () && node2.latest (nano::dev_genesis_key.pub) == block3->hash (); ASSERT_NO_ERROR (system.poll ()); } } // Sometimes hangs on the bootstrap_initiator.bootstrap call TEST (node, DISABLED_fork_stale) { nano::system system1 (1); system1.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::system system2 (1); auto & node1 (*system1.nodes[0]); auto & node2 (*system2.nodes[0]); node2.bootstrap_initiator.bootstrap (node1.network.endpoint (), false); std::shared_ptr<nano::transport::channel> channel (std::make_shared<nano::transport::channel_udp> (node2.network.udp_channels, node1.network.endpoint (), node2.network_params.protocol.protocol_version)); auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, std::vector<nano::block_hash> ()); node2.rep_crawler.response (channel, vote); nano::genesis genesis; nano::keypair key1; nano::keypair key2; nano::state_block_builder builder; auto send3 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Mxrb_ratio) .link (key1.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); node1.work_generate_blocking (*send3); node1.process_active (send3); system2.deadline_set (10s); while (node2.block (send3->hash ()) == nullptr) { system1.poll (); ASSERT_NO_ERROR (system2.poll ()); } auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send3->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Mxrb_ratio) .link (key1.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); node1.work_generate_blocking (*send1); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send3->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Mxrb_ratio) .link (key2.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (0) .build_shared (); node1.work_generate_blocking (*send2); { auto transaction1 (node1.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction1, *send1).code); auto transaction2 (node2.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node2.ledger.process (transaction2, *send2).code); } node1.process_active (send1); node1.process_active (send2); node2.process_active (send1); node2.process_active (send2); node2.bootstrap_initiator.bootstrap (node1.network.endpoint (), false); while (node2.block (send1->hash ()) == nullptr) { system1.poll (); ASSERT_NO_ERROR (system2.poll ()); } } TEST (node, broadcast_elected) { std::vector<nano::transport::transport_type> types{ nano::transport::transport_type::tcp, nano::transport::transport_type::udp }; for (auto & type : types) { nano::node_flags node_flags; if (type == nano::transport::transport_type::udp) { node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_udp = false; } nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node0 = system.add_node (node_config, node_flags, type); node_config.peering_port = nano::get_available_port (); auto node1 = system.add_node (node_config, node_flags, type); node_config.peering_port = nano::get_available_port (); auto node2 = system.add_node (node_config, node_flags, type); nano::keypair rep_big; nano::keypair rep_small; nano::keypair rep_other; nano::block_builder builder; { auto transaction0 (node0->store.tx_begin_write ()); auto transaction1 (node1->store.tx_begin_write ()); auto transaction2 (node2->store.tx_begin_write ()); auto fund_big = *builder.send () .previous (nano::genesis_hash) .destination (rep_big.pub) .balance (nano::Gxrb_ratio * 5) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build (); auto open_big = *builder.open () .source (fund_big.hash ()) .representative (rep_big.pub) .account (rep_big.pub) .sign (rep_big.prv, rep_big.pub) .work (*system.work.generate (rep_big.pub)) .build (); auto fund_small = *builder.send () .previous (fund_big.hash ()) .destination (rep_small.pub) .balance (nano::Gxrb_ratio * 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (fund_big.hash ())) .build (); auto open_small = *builder.open () .source (fund_small.hash ()) .representative (rep_small.pub) .account (rep_small.pub) .sign (rep_small.prv, rep_small.pub) .work (*system.work.generate (rep_small.pub)) .build (); auto fund_other = *builder.send () .previous (fund_small.hash ()) .destination (rep_other.pub) .balance (nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (fund_small.hash ())) .build (); auto open_other = *builder.open () .source (fund_other.hash ()) .representative (rep_other.pub) .account (rep_other.pub) .sign (rep_other.prv, rep_other.pub) .work (*system.work.generate (rep_other.pub)) .build (); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction0, fund_big).code); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction1, fund_big).code); ASSERT_EQ (nano::process_result::progress, node2->ledger.process (transaction2, fund_big).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction0, open_big).code); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction1, open_big).code); ASSERT_EQ (nano::process_result::progress, node2->ledger.process (transaction2, open_big).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction0, fund_small).code); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction1, fund_small).code); ASSERT_EQ (nano::process_result::progress, node2->ledger.process (transaction2, fund_small).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction0, open_small).code); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction1, open_small).code); ASSERT_EQ (nano::process_result::progress, node2->ledger.process (transaction2, open_small).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction0, fund_other).code); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction1, fund_other).code); ASSERT_EQ (nano::process_result::progress, node2->ledger.process (transaction2, fund_other).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction0, open_other).code); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction1, open_other).code); ASSERT_EQ (nano::process_result::progress, node2->ledger.process (transaction2, open_other).code); } // Confirm blocks to allow voting for (auto & node : system.nodes) { auto block (node->block (node->latest (nano::dev_genesis_key.pub))); ASSERT_NE (nullptr, block); node->block_confirm (block); auto election (node->active.election (block->qualified_root ())); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (5s, 4 == node->ledger.cache.cemented_count) } system.wallet (0)->insert_adhoc (rep_big.prv); system.wallet (1)->insert_adhoc (rep_small.prv); system.wallet (2)->insert_adhoc (rep_other.prv); auto fork0 = builder.send () .previous (node2->latest (nano::dev_genesis_key.pub)) .destination (rep_small.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node0->work_generate_blocking (node2->latest (nano::dev_genesis_key.pub))) .build_shared (); // A copy is necessary to avoid data races during ledger processing, which sets the sideband auto fork0_copy (std::make_shared<nano::send_block> (*fork0)); node0->process_active (fork0); node1->process_active (fork0_copy); auto fork1 = builder.send () .previous (node2->latest (nano::dev_genesis_key.pub)) .destination (rep_big.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node0->work_generate_blocking (node2->latest (nano::dev_genesis_key.pub))) .build_shared (); system.wallet (2)->insert_adhoc (rep_small.prv); node2->process_active (fork1); ASSERT_TIMELY (10s, node0->ledger.block_exists (fork0->hash ()) && node1->ledger.block_exists (fork0->hash ())); system.deadline_set (50s); while (!node2->ledger.block_exists (fork0->hash ())) { auto ec = system.poll (); ASSERT_TRUE (node0->ledger.block_exists (fork0->hash ())); ASSERT_TRUE (node1->ledger.block_exists (fork0->hash ())); ASSERT_NO_ERROR (ec); } ASSERT_TIMELY (5s, node1->stats.count (nano::stat::type::confirmation_observer, nano::stat::detail::inactive_conf_height, nano::stat::dir::out) != 0); } } TEST (node, rep_self_vote) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.online_weight_minimum = std::numeric_limits<nano::uint128_t>::max (); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node0 = system.add_node (node_config); nano::keypair rep_big; nano::block_builder builder; auto fund_big = *builder.send () .previous (nano::genesis_hash) .destination (rep_big.pub) .balance (nano::uint128_t{ "0xb0000000000000000000000000000000" }) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build (); auto open_big = *builder.open () .source (fund_big.hash ()) .representative (rep_big.pub) .account (rep_big.pub) .sign (rep_big.prv, rep_big.pub) .work (*system.work.generate (rep_big.pub)) .build (); ASSERT_EQ (nano::process_result::progress, node0->process (fund_big).code); ASSERT_EQ (nano::process_result::progress, node0->process (open_big).code); // Confirm both blocks, allowing voting on the upcoming block node0->block_confirm (node0->block (open_big.hash ())); auto election = node0->active.election (open_big.qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); system.wallet (0)->insert_adhoc (rep_big.prv); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_EQ (system.wallet (0)->wallets.reps ().voting, 2); auto block0 = builder.send () .previous (fund_big.hash ()) .destination (rep_big.pub) .balance (nano::uint128_t ("0x60000000000000000000000000000000")) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (fund_big.hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node0->process (*block0).code); auto & active (node0->active); auto election1 = active.insert (block0); // Wait until representatives are activated & make vote ASSERT_TIMELY (1s, election1.election->votes ().size () == 3); auto rep_votes (election1.election->votes ()); ASSERT_NE (rep_votes.end (), rep_votes.find (nano::dev_genesis_key.pub)); ASSERT_NE (rep_votes.end (), rep_votes.find (rep_big.pub)); } // Bootstrapping shouldn't republish the blocks to the network. TEST (node, DISABLED_bootstrap_no_publish) { nano::system system0 (1); nano::system system1 (1); auto node0 (system0.nodes[0]); auto node1 (system1.nodes[0]); nano::keypair key0; // node0 knows about send0 but node1 doesn't. nano::send_block send0 (node0->latest (nano::dev_genesis_key.pub), key0.pub, 500, nano::dev_genesis_key.prv, nano::dev_genesis_key.pub, 0); { auto transaction (node0->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, send0).code); } ASSERT_FALSE (node1->bootstrap_initiator.in_progress ()); node1->bootstrap_initiator.bootstrap (node0->network.endpoint (), false); ASSERT_TRUE (node1->active.empty ()); system1.deadline_set (10s); while (node1->block (send0.hash ()) == nullptr) { // Poll until the TCP connection is torn down and in_progress goes false system0.poll (); auto ec = system1.poll (); // There should never be an active transaction because the only activity is bootstrapping 1 block which shouldn't be publishing. ASSERT_TRUE (node1->active.empty ()); ASSERT_NO_ERROR (ec); } } // Check that an outgoing bootstrap request can push blocks TEST (node, bootstrap_bulk_push) { nano::system system0; nano::system system1; nano::node_config config0 (nano::get_available_port (), system0.logging); config0.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node0 (system0.add_node (config0)); nano::node_config config1 (nano::get_available_port (), system1.logging); config1.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node1 (system1.add_node (config1)); nano::keypair key0; // node0 knows about send0 but node1 doesn't. auto send0 = nano::send_block_builder () .previous (nano::genesis_hash) .destination (key0.pub) .balance (500) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node0->work_generate_blocking (nano::genesis_hash)) .build_shared (); ASSERT_EQ (nano::process_result::progress, node0->process (*send0).code); ASSERT_FALSE (node0->bootstrap_initiator.in_progress ()); ASSERT_FALSE (node1->bootstrap_initiator.in_progress ()); ASSERT_TRUE (node1->active.empty ()); node0->bootstrap_initiator.bootstrap (node1->network.endpoint (), false); system1.deadline_set (10s); while (node1->block (send0->hash ()) == nullptr) { ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); } // since this uses bulk_push, the new block should be republished system1.deadline_set (10s); while (node1->active.empty ()) { ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); } } // Bootstrapping a forked open block should succeed. TEST (node, bootstrap_fork_open) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); auto node0 = system.add_node (node_config); node_config.peering_port = nano::get_available_port (); auto node1 = system.add_node (node_config); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key0; nano::block_builder builder; auto send0 = *builder.send () .previous (nano::genesis_hash) .destination (key0.pub) .balance (nano::genesis_amount - 500) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build (); auto open0 = *builder.open () .source (send0.hash ()) .representative (1) .account (key0.pub) .sign (key0.prv, key0.pub) .work (*system.work.generate (key0.pub)) .build (); auto open1 = *builder.open () .source (send0.hash ()) .representative (2) .account (key0.pub) .sign (key0.prv, key0.pub) .work (*system.work.generate (key0.pub)) .build (); // Both know about send0 ASSERT_EQ (nano::process_result::progress, node0->process (send0).code); ASSERT_EQ (nano::process_result::progress, node1->process (send0).code); // Confirm send0 to allow starting and voting on the following blocks for (auto node : system.nodes) { node->block_confirm (node->block (node->latest (nano::dev_genesis_key.pub))); auto election = node->active.election (send0.qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (2s, node->active.empty ()); } ASSERT_TIMELY (3s, node0->block_confirmed (send0.hash ())); // They disagree about open0/open1 ASSERT_EQ (nano::process_result::progress, node0->process (open0).code); ASSERT_EQ (nano::process_result::progress, node1->process (open1).code); ASSERT_FALSE (node1->ledger.block_exists (open0.hash ())); ASSERT_FALSE (node1->bootstrap_initiator.in_progress ()); node1->bootstrap_initiator.bootstrap (node0->network.endpoint (), false); ASSERT_TRUE (node1->active.empty ()); ASSERT_TIMELY (10s, !node1->ledger.block_exists (open1.hash ()) && node1->ledger.block_exists (open0.hash ())); } // Unconfirmed blocks from bootstrap should be confirmed TEST (node, bootstrap_confirm_frontiers) { nano::system system0 (1); nano::system system1 (1); auto node0 (system0.nodes[0]); auto node1 (system1.nodes[0]); system0.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key0; // node0 knows about send0 but node1 doesn't. auto send0 = nano::send_block_builder () .previous (nano::genesis_hash) .destination (key0.pub) .balance (nano::genesis_amount - 500) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node0->work_generate_blocking (nano::genesis_hash)) .build_shared (); ASSERT_EQ (nano::process_result::progress, node0->process (*send0).code); ASSERT_FALSE (node0->bootstrap_initiator.in_progress ()); ASSERT_FALSE (node1->bootstrap_initiator.in_progress ()); ASSERT_TRUE (node1->active.empty ()); node1->bootstrap_initiator.bootstrap (node0->network.endpoint ()); // Additionally add new peer to confirm bootstrap frontier system1.deadline_set (10s); while (node1->block (send0->hash ()) == nullptr) { ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); } // Wait for election start system1.deadline_set (10s); while (node1->active.empty ()) { ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); } { nano::lock_guard<nano::mutex> guard (node1->active.mutex); auto existing1 (node1->active.blocks.find (send0->hash ())); ASSERT_NE (node1->active.blocks.end (), existing1); } // Wait for confirmation height update system1.deadline_set (10s); bool done (false); while (!done) { { auto transaction (node1->store.tx_begin_read ()); done = node1->ledger.block_confirmed (transaction, send0->hash ()); } ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); } } // Test that if we create a block that isn't confirmed, we sync. TEST (node, DISABLED_unconfirmed_send) { nano::system system (2); auto & node0 (*system.nodes[0]); auto & node1 (*system.nodes[1]); auto wallet0 (system.wallet (0)); auto wallet1 (system.wallet (1)); nano::keypair key0; wallet1->insert_adhoc (key0.prv); wallet0->insert_adhoc (nano::dev_genesis_key.prv); auto send1 (wallet0->send_action (nano::genesis_account, key0.pub, 2 * nano::Mxrb_ratio)); ASSERT_TIMELY (10s, node1.balance (key0.pub) == 2 * nano::Mxrb_ratio && !node1.bootstrap_initiator.in_progress ()); auto latest (node1.latest (key0.pub)); nano::state_block send2 (key0.pub, latest, nano::genesis_account, nano::Mxrb_ratio, nano::genesis_account, key0.prv, key0.pub, *node0.work_generate_blocking (latest)); { auto transaction (node1.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, send2).code); } auto send3 (wallet1->send_action (key0.pub, nano::genesis_account, nano::Mxrb_ratio)); ASSERT_TIMELY (10s, node0.balance (nano::genesis_account) == nano::genesis_amount); } // Test that nodes can track nodes that have rep weight for priority broadcasting TEST (node, rep_list) { nano::system system (2); auto & node1 (*system.nodes[1]); auto wallet0 (system.wallet (0)); auto wallet1 (system.wallet (1)); // Node0 has a rep wallet0->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key1; // Broadcast a confirm so others should know this is a rep node wallet0->send_action (nano::dev_genesis_key.pub, key1.pub, nano::Mxrb_ratio); ASSERT_EQ (0, node1.rep_crawler.representatives (1).size ()); system.deadline_set (10s); auto done (false); while (!done) { auto reps (node1.rep_crawler.representatives (1)); if (!reps.empty ()) { if (!reps[0].weight.is_zero ()) { done = true; } } ASSERT_NO_ERROR (system.poll ()); } } TEST (node, rep_weight) { nano::system system; auto add_node = [&system] { auto node = std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work); node->start (); system.nodes.push_back (node); return node; }; auto & node = *add_node (); auto & node1 = *add_node (); auto & node2 = *add_node (); auto & node3 = *add_node (); nano::genesis genesis; nano::keypair keypair1; nano::keypair keypair2; nano::block_builder builder; auto amount_pr (node.minimum_principal_weight () + 100); auto amount_not_pr (node.minimum_principal_weight () - 100); std::shared_ptr<nano::block> block1 = builder .state () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - amount_not_pr) .link (keypair1.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build (); std::shared_ptr<nano::block> block2 = builder .state () .account (keypair1.pub) .previous (0) .representative (keypair1.pub) .balance (amount_not_pr) .link (block1->hash ()) .sign (keypair1.prv, keypair1.pub) .work (*system.work.generate (keypair1.pub)) .build (); std::shared_ptr<nano::block> block3 = builder .state () .account (nano::dev_genesis_key.pub) .previous (block1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - amount_not_pr - amount_pr) .link (keypair2.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (block1->hash ())) .build (); std::shared_ptr<nano::block> block4 = builder .state () .account (keypair2.pub) .previous (0) .representative (keypair2.pub) .balance (amount_pr) .link (block3->hash ()) .sign (keypair2.prv, keypair2.pub) .work (*system.work.generate (keypair2.pub)) .build (); { auto transaction = node.store.tx_begin_write (); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block1).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block2).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block3).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block4).code); } ASSERT_TRUE (node.rep_crawler.representatives (1).empty ()); std::shared_ptr<nano::transport::channel> channel1 = nano::establish_tcp (system, node, node1.network.endpoint ()); ASSERT_NE (nullptr, channel1); std::shared_ptr<nano::transport::channel> channel2 = nano::establish_tcp (system, node, node2.network.endpoint ()); ASSERT_NE (nullptr, channel2); std::shared_ptr<nano::transport::channel> channel3 = nano::establish_tcp (system, node, node3.network.endpoint ()); ASSERT_NE (nullptr, channel3); auto vote0 = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, genesis.open); auto vote1 = std::make_shared<nano::vote> (keypair1.pub, keypair1.prv, 0, genesis.open); auto vote2 = std::make_shared<nano::vote> (keypair2.pub, keypair2.prv, 0, genesis.open); node.rep_crawler.response (channel1, vote0); node.rep_crawler.response (channel2, vote1); node.rep_crawler.response (channel3, vote2); ASSERT_TIMELY (5s, node.rep_crawler.representative_count () == 2); // Make sure we get the rep with the most weight first auto reps (node.rep_crawler.representatives (1)); ASSERT_EQ (1, reps.size ()); ASSERT_EQ (node.balance (nano::dev_genesis_key.pub), reps[0].weight.number ()); ASSERT_EQ (nano::dev_genesis_key.pub, reps[0].account); ASSERT_EQ (*channel1, reps[0].channel_ref ()); ASSERT_TRUE (node.rep_crawler.is_pr (*channel1)); ASSERT_FALSE (node.rep_crawler.is_pr (*channel2)); ASSERT_TRUE (node.rep_crawler.is_pr (*channel3)); } TEST (node, rep_remove) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto & node = *system.add_node (node_flags); nano::genesis genesis; nano::keypair keypair1; nano::keypair keypair2; nano::block_builder builder; std::shared_ptr<nano::block> block1 = builder .state () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - node.minimum_principal_weight () * 2) .link (keypair1.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build (); std::shared_ptr<nano::block> block2 = builder .state () .account (keypair1.pub) .previous (0) .representative (keypair1.pub) .balance (node.minimum_principal_weight () * 2) .link (block1->hash ()) .sign (keypair1.prv, keypair1.pub) .work (*system.work.generate (keypair1.pub)) .build (); std::shared_ptr<nano::block> block3 = builder .state () .account (nano::dev_genesis_key.pub) .previous (block1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - node.minimum_principal_weight () * 4) .link (keypair2.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (block1->hash ())) .build (); std::shared_ptr<nano::block> block4 = builder .state () .account (keypair2.pub) .previous (0) .representative (keypair2.pub) .balance (node.minimum_principal_weight () * 2) .link (block3->hash ()) .sign (keypair2.prv, keypair2.pub) .work (*system.work.generate (keypair2.pub)) .build (); { auto transaction = node.store.tx_begin_write (); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block1).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block2).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block3).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *block4).code); } // Add inactive UDP representative channel nano::endpoint endpoint0 (boost::asio::ip::address_v6::loopback (), nano::get_available_port ()); std::shared_ptr<nano::transport::channel> channel0 (std::make_shared<nano::transport::channel_udp> (node.network.udp_channels, endpoint0, node.network_params.protocol.protocol_version)); auto channel_udp = node.network.udp_channels.insert (endpoint0, node.network_params.protocol.protocol_version); auto vote1 = std::make_shared<nano::vote> (keypair1.pub, keypair1.prv, 0, genesis.open); ASSERT_FALSE (node.rep_crawler.response (channel0, vote1)); ASSERT_TIMELY (5s, node.rep_crawler.representative_count () == 1); auto reps (node.rep_crawler.representatives (1)); ASSERT_EQ (1, reps.size ()); ASSERT_EQ (node.minimum_principal_weight () * 2, reps[0].weight.number ()); ASSERT_EQ (keypair1.pub, reps[0].account); ASSERT_EQ (*channel0, reps[0].channel_ref ()); // Modify last_packet_received so the channel is removed faster std::chrono::steady_clock::time_point fake_timepoint{}; node.network.udp_channels.modify (channel_udp, [fake_timepoint](std::shared_ptr<nano::transport::channel_udp> const & channel_a) { channel_a->set_last_packet_received (fake_timepoint); }); // This UDP channel is not reachable and should timeout ASSERT_EQ (1, node.rep_crawler.representative_count ()); ASSERT_TIMELY (10s, node.rep_crawler.representative_count () == 0); // Add working representative auto node1 = system.add_node (nano::node_config (nano::get_available_port (), system.logging)); system.wallet (1)->insert_adhoc (nano::dev_genesis_key.prv); auto channel1 (node.network.find_channel (node1->network.endpoint ())); ASSERT_NE (nullptr, channel1); auto vote2 = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, genesis.open); node.rep_crawler.response (channel1, vote2); ASSERT_TIMELY (10s, node.rep_crawler.representative_count () == 1); // Add inactive TCP representative channel auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::unique_path (), nano::node_config (nano::get_available_port (), system.logging), system.work)); std::weak_ptr<nano::node> node_w (node.shared ()); auto vote3 = std::make_shared<nano::vote> (keypair2.pub, keypair2.prv, 0, genesis.open); node.network.tcp_channels.start_tcp (node2->network.endpoint (), [node_w, &vote3](std::shared_ptr<nano::transport::channel> const & channel2) { if (auto node_l = node_w.lock ()) { ASSERT_FALSE (node_l->rep_crawler.response (channel2, vote3)); } }); ASSERT_TIMELY (10s, node.rep_crawler.representative_count () == 2); node2->stop (); ASSERT_TIMELY (10s, node.rep_crawler.representative_count () == 1); reps = node.rep_crawler.representatives (1); ASSERT_EQ (nano::dev_genesis_key.pub, reps[0].account); ASSERT_EQ (1, node.network.size ()); auto list (node.network.list (1)); ASSERT_EQ (node1->network.endpoint (), list[0]->get_endpoint ()); } TEST (node, rep_connection_close) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); // Add working representative (node 2) system.wallet (1)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_TIMELY (10s, node1.rep_crawler.representative_count () == 1); node2.stop (); // Remove representative with closed channel ASSERT_TIMELY (10s, node1.rep_crawler.representative_count () == 0); } // Test that nodes can disable representative voting TEST (node, no_voting) { nano::system system (1); auto & node0 (*system.nodes[0]); nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; system.add_node (node_config); auto wallet0 (system.wallet (0)); auto wallet1 (system.wallet (1)); // Node1 has a rep wallet1->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key1; wallet1->insert_adhoc (key1.prv); // Broadcast a confirm so others should know this is a rep node wallet1->send_action (nano::dev_genesis_key.pub, key1.pub, nano::Mxrb_ratio); ASSERT_TIMELY (10s, node0.active.empty ()); ASSERT_EQ (0, node0.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::in)); } TEST (node, send_callback) { nano::system system (1); auto & node0 (*system.nodes[0]); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->insert_adhoc (key2.prv); node0.config.callback_address = "localhost"; node0.config.callback_port = 8010; node0.config.callback_target = "/"; ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev_genesis_key.pub, key2.pub, node0.config.receive_minimum.number ())); ASSERT_TIMELY (10s, node0.balance (key2.pub).is_zero ()); ASSERT_EQ (std::numeric_limits<nano::uint128_t>::max () - node0.config.receive_minimum.number (), node0.balance (nano::dev_genesis_key.pub)); } TEST (node, balance_observer) { nano::system system (1); auto & node1 (*system.nodes[0]); std::atomic<int> balances (0); nano::keypair key; node1.observers.account_balance.add ([&key, &balances](nano::account const & account_a, bool is_pending) { if (key.pub == account_a && is_pending) { balances++; } else if (nano::dev_genesis_key.pub == account_a && !is_pending) { balances++; } }); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); system.wallet (0)->send_action (nano::dev_genesis_key.pub, key.pub, 1); system.deadline_set (10s); auto done (false); while (!done) { auto ec = system.poll (); done = balances.load () == 2; ASSERT_NO_ERROR (ec); } } TEST (node, bootstrap_connection_scaling) { nano::system system (1); auto & node1 (*system.nodes[0]); ASSERT_EQ (34, node1.bootstrap_initiator.connections->target_connections (5000, 1)); ASSERT_EQ (4, node1.bootstrap_initiator.connections->target_connections (0, 1)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (50000, 1)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (10000000000, 1)); ASSERT_EQ (32, node1.bootstrap_initiator.connections->target_connections (5000, 0)); ASSERT_EQ (1, node1.bootstrap_initiator.connections->target_connections (0, 0)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (50000, 0)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (10000000000, 0)); ASSERT_EQ (36, node1.bootstrap_initiator.connections->target_connections (5000, 2)); ASSERT_EQ (8, node1.bootstrap_initiator.connections->target_connections (0, 2)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (50000, 2)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (10000000000, 2)); node1.config.bootstrap_connections = 128; ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (0, 1)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (50000, 1)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (0, 2)); ASSERT_EQ (64, node1.bootstrap_initiator.connections->target_connections (50000, 2)); node1.config.bootstrap_connections_max = 256; ASSERT_EQ (128, node1.bootstrap_initiator.connections->target_connections (0, 1)); ASSERT_EQ (256, node1.bootstrap_initiator.connections->target_connections (50000, 1)); ASSERT_EQ (256, node1.bootstrap_initiator.connections->target_connections (0, 2)); ASSERT_EQ (256, node1.bootstrap_initiator.connections->target_connections (50000, 2)); node1.config.bootstrap_connections_max = 0; ASSERT_EQ (1, node1.bootstrap_initiator.connections->target_connections (0, 1)); ASSERT_EQ (1, node1.bootstrap_initiator.connections->target_connections (50000, 1)); } // Test stat counting at both type and detail levels TEST (node, stat_counting) { nano::system system (1); auto & node1 (*system.nodes[0]); node1.stats.add (nano::stat::type::ledger, nano::stat::dir::in, 1); node1.stats.add (nano::stat::type::ledger, nano::stat::dir::in, 5); node1.stats.inc (nano::stat::type::ledger, nano::stat::dir::in); node1.stats.inc (nano::stat::type::ledger, nano::stat::detail::send, nano::stat::dir::in); node1.stats.inc (nano::stat::type::ledger, nano::stat::detail::send, nano::stat::dir::in); node1.stats.inc (nano::stat::type::ledger, nano::stat::detail::receive, nano::stat::dir::in); ASSERT_EQ (10, node1.stats.count (nano::stat::type::ledger, nano::stat::dir::in)); ASSERT_EQ (2, node1.stats.count (nano::stat::type::ledger, nano::stat::detail::send, nano::stat::dir::in)); ASSERT_EQ (1, node1.stats.count (nano::stat::type::ledger, nano::stat::detail::receive, nano::stat::dir::in)); node1.stats.add (nano::stat::type::ledger, nano::stat::dir::in, 0); ASSERT_EQ (10, node1.stats.count (nano::stat::type::ledger, nano::stat::dir::in)); } TEST (node, stat_histogram) { nano::system system (1); auto & node1 (*system.nodes[0]); // Specific bins node1.stats.define_histogram (nano::stat::type::vote, nano::stat::detail::confirm_req, nano::stat::dir::in, { 1, 6, 10, 16 }); node1.stats.update_histogram (nano::stat::type::vote, nano::stat::detail::confirm_req, nano::stat::dir::in, 1, 50); auto histogram_req (node1.stats.get_histogram (nano::stat::type::vote, nano::stat::detail::confirm_req, nano::stat::dir::in)); ASSERT_EQ (histogram_req->get_bins ()[0].value, 50); // Uniform distribution (12 bins, width 1); also test clamping 100 to the last bin node1.stats.define_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::in, { 1, 13 }, 12); node1.stats.update_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::in, 1); node1.stats.update_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::in, 8, 10); node1.stats.update_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::in, 100); auto histogram_ack (node1.stats.get_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::in)); ASSERT_EQ (histogram_ack->get_bins ()[0].value, 1); ASSERT_EQ (histogram_ack->get_bins ()[7].value, 10); ASSERT_EQ (histogram_ack->get_bins ()[11].value, 1); // Uniform distribution (2 bins, width 5); add 1 to each bin node1.stats.define_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::out, { 1, 11 }, 2); node1.stats.update_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::out, 1, 1); node1.stats.update_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::out, 6, 1); auto histogram_ack_out (node1.stats.get_histogram (nano::stat::type::vote, nano::stat::detail::confirm_ack, nano::stat::dir::out)); ASSERT_EQ (histogram_ack_out->get_bins ()[0].value, 1); ASSERT_EQ (histogram_ack_out->get_bins ()[1].value, 1); } TEST (node, online_reps) { nano::system system (1); auto & node1 (*system.nodes[0]); // 1 sample of minimum weight ASSERT_EQ (node1.config.online_weight_minimum, node1.online_reps.trended ()); auto vote (std::make_shared<nano::vote> ()); ASSERT_EQ (0, node1.online_reps.online ()); node1.online_reps.observe (nano::dev_genesis_key.pub); ASSERT_EQ (nano::genesis_amount, node1.online_reps.online ()); // 1 minimum, 1 maximum ASSERT_EQ (node1.config.online_weight_minimum, node1.online_reps.trended ()); node1.online_reps.sample (); ASSERT_EQ (nano::genesis_amount, node1.online_reps.trended ()); node1.online_reps.clear (); // 2 minimum, 1 maximum node1.online_reps.sample (); ASSERT_EQ (node1.config.online_weight_minimum, node1.online_reps.trended ()); } namespace nano { TEST (node, online_reps_rep_crawler) { nano::system system; nano::node_flags flags; flags.disable_rep_crawler = true; auto & node1 = *system.add_node (flags); auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, nano::milliseconds_since_epoch (), std::vector<nano::block_hash>{ nano::genesis_hash }); ASSERT_EQ (0, node1.online_reps.online ()); // Without rep crawler node1.vote_processor.vote_blocking (vote, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_EQ (0, node1.online_reps.online ()); // After inserting to rep crawler { nano::lock_guard<nano::mutex> guard (node1.rep_crawler.probable_reps_mutex); node1.rep_crawler.active.insert (nano::genesis_hash); } node1.vote_processor.vote_blocking (vote, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_EQ (nano::genesis_amount, node1.online_reps.online ()); } } TEST (node, online_reps_election) { nano::system system; nano::node_flags flags; flags.disable_rep_crawler = true; auto & node1 = *system.add_node (flags); // Start election nano::genesis genesis; nano::keypair key; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (genesis.hash ())) .build_shared (); node1.process_active (send1); node1.block_processor.flush (); ASSERT_EQ (1, node1.active.size ()); // Process vote for ongoing election auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, nano::milliseconds_since_epoch (), std::vector<nano::block_hash>{ send1->hash () }); ASSERT_EQ (0, node1.online_reps.online ()); node1.vote_processor.vote_blocking (vote, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_EQ (nano::genesis_amount - nano::Gxrb_ratio, node1.online_reps.online ()); } TEST (node, block_confirm) { std::vector<nano::transport::transport_type> types{ nano::transport::transport_type::tcp, nano::transport::transport_type::udp }; for (auto & type : types) { nano::node_flags node_flags; if (type == nano::transport::transport_type::udp) { node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_udp = false; } nano::system system (2, type, node_flags); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); nano::genesis genesis; nano::keypair key; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (genesis.hash ())) .build_shared (); // A copy is necessary to avoid data races during ledger processing, which sets the sideband auto send1_copy = builder.make_block () .from (*send1) .build_shared (); node1.block_processor.add (send1, nano::seconds_since_epoch ()); node2.block_processor.add (send1_copy, nano::seconds_since_epoch ()); ASSERT_TIMELY (5s, node1.ledger.block_exists (send1->hash ()) && node2.ledger.block_exists (send1_copy->hash ())); ASSERT_TRUE (node1.ledger.block_exists (send1->hash ())); ASSERT_TRUE (node2.ledger.block_exists (send1_copy->hash ())); // Confirm send1 on node2 so it can vote for send2 node2.block_confirm (send1_copy); auto election = node2.active.election (send1_copy->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (3s, node2.block_confirmed (send1_copy->hash ()) && node2.active.empty ()); system.wallet (1)->insert_adhoc (nano::dev_genesis_key.prv); auto send2 (std::make_shared<nano::state_block> (nano::dev_genesis_key.pub, send1->hash (), nano::dev_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio * 2, key.pub, nano::dev_genesis_key.prv, nano::dev_genesis_key.pub, *node1.work_generate_blocking (send1->hash ()))); { auto transaction (node1.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send2).code); } { auto transaction (node2.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node2.ledger.process (transaction, *send2).code); } ASSERT_TRUE (node1.active.list_recently_cemented ().empty ()); node1.block_confirm (send2); ASSERT_TIMELY (10s, node1.active.list_recently_cemented ().size () == 2); } } TEST (node, block_arrival) { nano::system system (1); auto & node (*system.nodes[0]); ASSERT_EQ (0, node.block_arrival.arrival.size ()); nano::block_hash hash1 (1); node.block_arrival.add (hash1); ASSERT_EQ (1, node.block_arrival.arrival.size ()); node.block_arrival.add (hash1); ASSERT_EQ (1, node.block_arrival.arrival.size ()); nano::block_hash hash2 (2); node.block_arrival.add (hash2); ASSERT_EQ (2, node.block_arrival.arrival.size ()); } TEST (node, block_arrival_size) { nano::system system (1); auto & node (*system.nodes[0]); auto time (std::chrono::steady_clock::now () - nano::block_arrival::arrival_time_min - std::chrono::seconds (5)); nano::block_hash hash (0); for (auto i (0); i < nano::block_arrival::arrival_size_min * 2; ++i) { node.block_arrival.arrival.push_back (nano::block_arrival_info{ time, hash }); ++hash.qwords[0]; } ASSERT_EQ (nano::block_arrival::arrival_size_min * 2, node.block_arrival.arrival.size ()); node.block_arrival.recent (0); ASSERT_EQ (nano::block_arrival::arrival_size_min, node.block_arrival.arrival.size ()); } TEST (node, block_arrival_time) { nano::system system (1); auto & node (*system.nodes[0]); auto time (std::chrono::steady_clock::now ()); nano::block_hash hash (0); for (auto i (0); i < nano::block_arrival::arrival_size_min * 2; ++i) { node.block_arrival.arrival.push_back (nano::block_arrival_info{ time, hash }); ++hash.qwords[0]; } ASSERT_EQ (nano::block_arrival::arrival_size_min * 2, node.block_arrival.arrival.size ()); node.block_arrival.recent (0); ASSERT_EQ (nano::block_arrival::arrival_size_min * 2, node.block_arrival.arrival.size ()); } TEST (node, confirm_quorum) { nano::system system (1); auto & node1 (*system.nodes[0]); nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); // Put greater than node.delta () in pending so quorum can't be reached nano::amount new_balance (node1.online_reps.delta () - nano::Gxrb_ratio); auto send1 = nano::state_block_builder () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (new_balance) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (genesis.hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node1.process (*send1).code); system.wallet (0)->send_action (nano::dev_genesis_key.pub, nano::dev_genesis_key.pub, new_balance.number ()); ASSERT_TIMELY (10s, !node1.active.empty ()); auto election (node1.active.election (nano::qualified_root (send1->hash (), send1->hash ()))); ASSERT_NE (nullptr, election); ASSERT_FALSE (election->confirmed ()); ASSERT_EQ (1, election->votes ().size ()); ASSERT_EQ (0, node1.balance (nano::dev_genesis_key.pub)); } TEST (node, local_votes_cache) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; node_config.receive_minimum = nano::genesis_amount; auto & node (*system.add_node (node_config)); nano::genesis genesis; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send1->hash ())) .build_shared (); auto send3 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send2->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 3 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send2->hash ())) .build_shared (); { auto transaction (node.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *send1).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *send2).code); } // Confirm blocks to allow voting node.block_confirm (send2); auto election = node.active.election (send2->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (3s, node.ledger.cache.cemented_count == 3); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::confirm_req message1 (send1); nano::confirm_req message2 (send2); auto channel (node.network.udp_channels.create (node.network.endpoint ())); node.network.process_message (message1, channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 1); node.network.process_message (message2, channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 2); for (auto i (0); i < 100; ++i) { node.network.process_message (message1, channel); node.network.process_message (message2, channel); } for (int i = 0; i < 4; ++i) { ASSERT_NO_ERROR (system.poll (node.aggregator.max_delay)); } // Make sure a new vote was not generated ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 2); // Max cache { auto transaction (node.store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node.ledger.process (transaction, *send3).code); } nano::confirm_req message3 (send3); for (auto i (0); i < 100; ++i) { node.network.process_message (message3, channel); } for (int i = 0; i < 4; ++i) { ASSERT_NO_ERROR (system.poll (node.aggregator.max_delay)); } ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 3); ASSERT_FALSE (node.history.votes (send1->root (), send1->hash ()).empty ()); ASSERT_FALSE (node.history.votes (send2->root (), send2->hash ()).empty ()); ASSERT_FALSE (node.history.votes (send3->root (), send3->hash ()).empty ()); } TEST (node, local_votes_cache_batch) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto & node (*system.add_node (node_config)); ASSERT_GE (node.network_params.voting.max_cache, 2); nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key1; auto send1 = nano::state_block_builder () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (key1.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *send1).code); node.confirmation_height_processor.add (send1); ASSERT_TIMELY (5s, node.ledger.block_confirmed (node.store.tx_begin_read (), send1->hash ())); auto send2 = nano::state_block_builder () .account (nano::dev_genesis_key.pub) .previous (send1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send1->hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *send2).code); auto receive1 = nano::state_block_builder () .account (key1.pub) .previous (0) .representative (nano::dev_genesis_key.pub) .balance (nano::Gxrb_ratio) .link (send1->hash ()) .sign (key1.prv, key1.pub) .work (*node.work_generate_blocking (key1.pub)) .build_shared (); ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *receive1).code); std::vector<std::pair<nano::block_hash, nano::root>> batch{ { send2->hash (), send2->root () }, { receive1->hash (), receive1->root () } }; nano::confirm_req message (batch); auto channel (node.network.udp_channels.create (node.network.endpoint ())); // Generates and sends one vote for both hashes which is then cached node.network.process_message (message, channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out) == 1); ASSERT_EQ (1, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out)); ASSERT_FALSE (node.history.votes (send2->root (), send2->hash ()).empty ()); ASSERT_FALSE (node.history.votes (receive1->root (), receive1->hash ()).empty ()); // Only one confirm_ack should be sent if all hashes are part of the same vote node.network.process_message (message, channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out) == 2); ASSERT_EQ (2, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out)); // Test when votes are different node.history.erase (send2->root ()); node.history.erase (receive1->root ()); node.network.process_message (nano::confirm_req (send2->hash (), send2->root ()), channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out) == 3); ASSERT_EQ (3, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out)); node.network.process_message (nano::confirm_req (receive1->hash (), receive1->root ()), channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out) == 4); ASSERT_EQ (4, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out)); // There are two different votes, so both should be sent in response node.network.process_message (message, channel); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out) == 6); ASSERT_EQ (6, node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out)); } TEST (node, local_votes_cache_generate_new_vote) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto & node (*system.add_node (node_config)); nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); // Repsond with cached vote nano::confirm_req message1 (genesis.open); auto channel (node.network.udp_channels.create (node.network.endpoint ())); node.network.process_message (message1, channel); ASSERT_TIMELY (3s, !node.history.votes (genesis.open->root (), genesis.open->hash ()).empty ()); auto votes1 (node.history.votes (genesis.open->root (), genesis.open->hash ())); ASSERT_EQ (1, votes1.size ()); ASSERT_EQ (1, votes1[0]->blocks.size ()); ASSERT_EQ (genesis.open->hash (), boost::get<nano::block_hash> (votes1[0]->blocks[0])); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 1); auto send1 = nano::state_block_builder () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node.process (*send1).code); // One of the hashes is cached std::vector<std::pair<nano::block_hash, nano::root>> roots_hashes{ std::make_pair (genesis.open->hash (), genesis.open->root ()), std::make_pair (send1->hash (), send1->root ()) }; nano::confirm_req message2 (roots_hashes); node.network.process_message (message2, channel); ASSERT_TIMELY (3s, !node.history.votes (send1->root (), send1->hash ()).empty ()); auto votes2 (node.history.votes (send1->root (), send1->hash ())); ASSERT_EQ (1, votes2.size ()); ASSERT_EQ (1, votes2[0]->blocks.size ()); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::requests, nano::stat::detail::requests_generated_votes) == 2); ASSERT_FALSE (node.history.votes (genesis.open->root (), genesis.open->hash ()).empty ()); ASSERT_FALSE (node.history.votes (send1->root (), send1->hash ()).empty ()); // First generated + again cached + new generated ASSERT_TIMELY (3s, 3 == node.stats.count (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out)); } TEST (node, local_votes_cache_fork) { nano::system system; nano::node_flags node_flags; node_flags.disable_bootstrap_bulk_push_client = true; node_flags.disable_bootstrap_bulk_pull_server = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_lazy_bootstrap = true; node_flags.disable_legacy_bootstrap = true; node_flags.disable_wallet_bootstrap = true; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto & node1 (*system.add_node (node_config, node_flags)); nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); auto send1 = nano::state_block_builder () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (genesis.hash ())) .build_shared (); auto send1_fork = nano::state_block_builder () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (genesis.hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node1.process (*send1).code); // Cache vote auto vote (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, std::vector<nano::block_hash> (1, send1->hash ()))); node1.vote_processor.vote (vote, std::make_shared<nano::transport::channel_loopback> (node1)); node1.history.add (send1->root (), send1->hash (), vote); auto votes2 (node1.history.votes (send1->root (), send1->hash ())); ASSERT_EQ (1, votes2.size ()); ASSERT_EQ (1, votes2[0]->blocks.size ()); // Start election for forked block node_config.peering_port = nano::get_available_port (); auto & node2 (*system.add_node (node_config, node_flags)); node2.process_active (send1_fork); node2.block_processor.flush (); ASSERT_TIMELY (5s, node2.ledger.block_exists (send1->hash ())); } TEST (node, vote_republish) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); nano::keypair key2; system.wallet (1)->insert_adhoc (key2.prv); nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number () * 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); node1.process_active (send1); ASSERT_TIMELY (5s, node2.block (send1->hash ())); node1.active.publish (send2); auto vote (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, std::numeric_limits<uint64_t>::max (), send2)); ASSERT_TRUE (node1.active.active (*send1)); ASSERT_TRUE (node2.active.active (*send1)); node1.vote_processor.vote (vote, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_TIMELY (10s, node1.block (send2->hash ())); ASSERT_TIMELY (10s, node2.block (send2->hash ())); ASSERT_FALSE (node1.block (send1->hash ())); ASSERT_FALSE (node2.block (send1->hash ())); ASSERT_TIMELY (10s, node2.balance (key2.pub) == node1.config.receive_minimum.number () * 2); ASSERT_TIMELY (10s, node1.balance (key2.pub) == node1.config.receive_minimum.number () * 2); } TEST (node, vote_by_hash_bundle) { // Keep max_hashes above system to ensure it is kept in scope as votes can be added during system destruction std::atomic<size_t> max_hashes{ 0 }; nano::system system (1); auto & node = *system.nodes[0]; nano::state_block_builder builder; std::vector<std::shared_ptr<nano::state_block>> blocks; auto block = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (nano::genesis_hash) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 1) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build_shared (); blocks.push_back (block); ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *blocks.back ()).code); for (auto i = 2; i < 200; ++i) { auto block = builder.make_block () .from (*blocks.back ()) .previous (blocks.back ()->hash ()) .balance (nano::genesis_amount - i) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (blocks.back ()->hash ())) .build_shared (); blocks.push_back (block); ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *blocks.back ()).code); } auto election_insertion_result = node.active.insert (blocks.back ()); ASSERT_TRUE (election_insertion_result.inserted); ASSERT_NE (nullptr, election_insertion_result.election); election_insertion_result.election->force_confirm (); system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::keypair key1; system.wallet (0)->insert_adhoc (key1.prv); system.nodes[0]->observers.vote.add ([&max_hashes](std::shared_ptr<nano::vote> const & vote_a, std::shared_ptr<nano::transport::channel> const &, nano::vote_code) { if (vote_a->blocks.size () > max_hashes) { max_hashes = vote_a->blocks.size (); } }); for (auto const & block : blocks) { system.nodes[0]->active.generator.add (block->root (), block->hash ()); } // Verify that bundling occurs. While reaching 12 should be common on most hardware in release mode, // we set this low enough to allow the test to pass on CI/with santitizers. ASSERT_TIMELY (20s, max_hashes.load () >= 3); } TEST (node, vote_by_hash_republish) { nano::system system{ 2 }; auto & node1 = *system.nodes[0]; auto & node2 = *system.nodes[1]; nano::keypair key2; system.wallet (1)->insert_adhoc (key2.prv); nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number () * 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); node1.process_active (send1); ASSERT_TIMELY (5s, node2.active.active (*send1)); node1.process_active (send2); std::vector<nano::block_hash> vote_blocks; vote_blocks.push_back (send2->hash ()); auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, std::numeric_limits<uint64_t>::max (), vote_blocks); ASSERT_TRUE (node1.active.active (*send1)); ASSERT_TRUE (node2.active.active (*send1)); node1.vote_processor.vote (vote, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_TIMELY (10s, node1.block (send2->hash ())); ASSERT_TIMELY (10s, node2.block (send2->hash ())); ASSERT_FALSE (node1.block (send1->hash ())); ASSERT_FALSE (node2.block (send1->hash ())); ASSERT_TIMELY (5s, node2.balance (key2.pub) == node1.config.receive_minimum.number () * 2); ASSERT_TIMELY (10s, node1.balance (key2.pub) == node1.config.receive_minimum.number () * 2); } TEST (node, vote_by_hash_epoch_block_republish) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); nano::keypair key2; system.wallet (1)->insert_adhoc (key2.prv); nano::genesis genesis; auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto epoch1 = nano::state_block_builder () .account (nano::genesis_account) .previous (genesis.hash ()) .representative (nano::genesis_account) .balance (nano::genesis_amount) .link (node1.ledger.epoch_link (nano::epoch::epoch_1)) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); node1.process_active (send1); ASSERT_TIMELY (5s, node2.active.active (*send1)); node1.active.publish (epoch1); std::vector<nano::block_hash> vote_blocks; vote_blocks.push_back (epoch1->hash ()); auto vote (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, vote_blocks)); ASSERT_TRUE (node1.active.active (*send1)); ASSERT_TRUE (node2.active.active (*send1)); node1.vote_processor.vote (vote, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_TIMELY (10s, node1.block (epoch1->hash ())); ASSERT_TIMELY (10s, node2.block (epoch1->hash ())); ASSERT_FALSE (node1.block (send1->hash ())); ASSERT_FALSE (node2.block (send1->hash ())); } TEST (node, epoch_conflict_confirm) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node0 = system.add_node (node_config); node_config.peering_port = nano::get_available_port (); auto node1 = system.add_node (node_config); nano::keypair key; nano::genesis genesis; nano::keypair epoch_signer (nano::dev_genesis_key); nano::state_block_builder builder; auto send = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 1) .link (key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto open = builder.make_block () .account (key.pub) .previous (0) .representative (key.pub) .balance (1) .link (send->hash ()) .sign (key.prv, key.pub) .work (*system.work.generate (key.pub)) .build_shared (); auto change = builder.make_block () .account (key.pub) .previous (open->hash ()) .representative (key.pub) .balance (1) .link (0) .sign (key.prv, key.pub) .work (*system.work.generate (open->hash ())) .build_shared (); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2) .link (open->hash ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send->hash ())) .build_shared (); auto epoch_open = builder.make_block () .account (change->root ().as_account ()) .previous (0) .representative (0) .balance (0) .link (node0->ledger.epoch_link (nano::epoch::epoch_1)) .sign (epoch_signer.prv, epoch_signer.pub) .work (*system.work.generate (open->hash ())) .build_shared (); ASSERT_EQ (nano::process_result::progress, node1->process (*send).code); ASSERT_EQ (nano::process_result::progress, node1->process (*send2).code); ASSERT_EQ (nano::process_result::progress, node1->process (*open).code); // Confirm block in node1 to allow generating votes node1->block_confirm (open); auto election (node1->active.election (open->qualified_root ())); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (3s, node1->block_confirmed (open->hash ())); ASSERT_EQ (nano::process_result::progress, node0->process (*send).code); ASSERT_EQ (nano::process_result::progress, node0->process (*send2).code); ASSERT_EQ (nano::process_result::progress, node0->process (*open).code); node0->process_active (change); node0->process_active (epoch_open); ASSERT_TIMELY (10s, node0->block (change->hash ()) && node0->block (epoch_open->hash ()) && node1->block (change->hash ()) && node1->block (epoch_open->hash ())); // Confirm blocks in node1 to allow generating votes nano::blocks_confirm (*node1, { change, epoch_open }, true /* forced */); ASSERT_TIMELY (3s, node1->block_confirmed (change->hash ()) && node1->block_confirmed (epoch_open->hash ())); // Start elections for node0 nano::blocks_confirm (*node0, { change, epoch_open }); ASSERT_EQ (2, node0->active.size ()); { nano::lock_guard<nano::mutex> lock (node0->active.mutex); ASSERT_TRUE (node0->active.blocks.find (change->hash ()) != node0->active.blocks.end ()); ASSERT_TRUE (node0->active.blocks.find (epoch_open->hash ()) != node0->active.blocks.end ()); } system.wallet (1)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_TIMELY (5s, node0->active.election (change->qualified_root ()) == nullptr); ASSERT_TIMELY (5s, node0->active.empty ()); { auto transaction (node0->store.tx_begin_read ()); ASSERT_TRUE (node0->ledger.store.block_exists (transaction, change->hash ())); ASSERT_TRUE (node0->ledger.store.block_exists (transaction, epoch_open->hash ())); } } TEST (node, fork_invalid_block_signature) { nano::system system; nano::node_flags node_flags; // Disabling republishing + waiting for a rollback before sending the correct vote below fixes an intermittent failure in this test // If these are taken out, one of two things may cause the test two fail often: // - Block *send2* might get processed before the rollback happens, simply due to timings, with code "fork", and not be processed again. Waiting for the rollback fixes this issue. // - Block *send1* might get processed again after the rollback happens, which causes *send2* to be processed with code "fork". Disabling block republishing ensures "send1" is not processed again. // An alternative would be to repeatedly flood the correct vote node_flags.disable_block_processor_republishing = true; auto & node1 (*system.add_node (node_flags)); auto & node2 (*system.add_node (node_flags)); nano::keypair key2; nano::genesis genesis; nano::send_block_builder builder; auto send1 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .previous (genesis.hash ()) .destination (key2.pub) .balance (std::numeric_limits<nano::uint128_t>::max () - node1.config.receive_minimum.number () * 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2_corrupt (std::make_shared<nano::send_block> (*send2)); send2_corrupt->signature = nano::signature (123); auto vote (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, send2)); auto vote_corrupt (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, send2_corrupt)); node1.process_active (send1); ASSERT_TIMELY (5s, node1.block (send1->hash ())); // Send the vote with the corrupt block signature node2.network.flood_vote (vote_corrupt, 1.0f); // Wait for the rollback ASSERT_TIMELY (5s, node1.stats.count (nano::stat::type::rollback, nano::stat::detail::all)); // Send the vote with the correct block node2.network.flood_vote (vote, 1.0f); ASSERT_TIMELY (10s, !node1.block (send1->hash ())); ASSERT_TIMELY (10s, node1.block (send2->hash ())); ASSERT_EQ (node1.block (send2->hash ())->block_signature (), send2->block_signature ()); } TEST (node, fork_election_invalid_block_signature) { nano::system system (1); auto & node1 (*system.nodes[0]); nano::genesis genesis; nano::block_builder builder; auto send1 = builder.state () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .build_shared (); auto send2 = builder.state () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .build_shared (); auto send3 = builder.state () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .sign (nano::dev_genesis_key.prv, 0) // Invalid signature .build_shared (); auto channel1 (node1.network.udp_channels.create (node1.network.endpoint ())); node1.network.process_message (nano::publish (send1), channel1); ASSERT_TIMELY (5s, node1.active.active (send1->qualified_root ())); auto election (node1.active.election (send1->qualified_root ())); ASSERT_NE (nullptr, election); ASSERT_EQ (1, election->blocks ().size ()); node1.network.process_message (nano::publish (send3), channel1); node1.network.process_message (nano::publish (send2), channel1); ASSERT_TIMELY (3s, election->blocks ().size () > 1); ASSERT_EQ (election->blocks ()[send2->hash ()]->block_signature (), send2->block_signature ()); } TEST (node, block_processor_signatures) { nano::system system0 (1); auto & node1 (*system0.nodes[0]); system0.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); nano::block_hash latest (system0.nodes[0]->latest (nano::dev_genesis_key.pub)); nano::state_block_builder builder; nano::keypair key1; nano::keypair key2; nano::keypair key3; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (latest) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (key1.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (latest)) .build_shared (); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (key2.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (send1->hash ())) .build_shared (); auto send3 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send2->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 3 * nano::Gxrb_ratio) .link (key3.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (send2->hash ())) .build_shared (); // Invalid signature bit auto send4 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send3->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 4 * nano::Gxrb_ratio) .link (key3.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (send3->hash ())) .build_shared (); send4->signature.bytes[32] ^= 0x1; // Invalid signature bit (force) auto send5 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send3->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 5 * nano::Gxrb_ratio) .link (key3.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (send3->hash ())) .build_shared (); send5->signature.bytes[31] ^= 0x1; // Invalid signature to unchecked { auto transaction (node1.store.tx_begin_write ()); node1.store.unchecked_put (transaction, send5->previous (), send5); } auto receive1 = builder.make_block () .account (key1.pub) .previous (0) .representative (nano::dev_genesis_key.pub) .balance (nano::Gxrb_ratio) .link (send1->hash ()) .sign (key1.prv, key1.pub) .work (*node1.work_generate_blocking (key1.pub)) .build_shared (); auto receive2 = builder.make_block () .account (key2.pub) .previous (0) .representative (nano::dev_genesis_key.pub) .balance (nano::Gxrb_ratio) .link (send2->hash ()) .sign (key2.prv, key2.pub) .work (*node1.work_generate_blocking (key2.pub)) .build_shared (); // Invalid private key auto receive3 = builder.make_block () .account (key3.pub) .previous (0) .representative (nano::dev_genesis_key.pub) .balance (nano::Gxrb_ratio) .link (send3->hash ()) .sign (key2.prv, key3.pub) .work (*node1.work_generate_blocking (key3.pub)) .build_shared (); node1.process_active (send1); node1.process_active (send2); node1.process_active (send3); node1.process_active (send4); node1.process_active (receive1); node1.process_active (receive2); node1.process_active (receive3); node1.block_processor.flush (); node1.block_processor.force (send5); node1.block_processor.flush (); auto transaction (node1.store.tx_begin_read ()); ASSERT_TRUE (node1.store.block_exists (transaction, send1->hash ())); ASSERT_TRUE (node1.store.block_exists (transaction, send2->hash ())); ASSERT_TRUE (node1.store.block_exists (transaction, send3->hash ())); ASSERT_FALSE (node1.store.block_exists (transaction, send4->hash ())); ASSERT_FALSE (node1.store.block_exists (transaction, send5->hash ())); ASSERT_TRUE (node1.store.block_exists (transaction, receive1->hash ())); ASSERT_TRUE (node1.store.block_exists (transaction, receive2->hash ())); ASSERT_FALSE (node1.store.block_exists (transaction, receive3->hash ())); } /* * State blocks go through a different signature path, ensure invalidly signed state blocks are rejected * This test can freeze if the wake conditions in block_processor::flush are off, for that reason this is done async here */ TEST (node, block_processor_reject_state) { nano::system system (1); auto & node (*system.nodes[0]); nano::genesis genesis; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); send1->signature.bytes[0] ^= 1; ASSERT_FALSE (node.ledger.block_exists (send1->hash ())); node.process_active (send1); auto flushed = std::async (std::launch::async, [&node] { node.block_processor.flush (); }); ASSERT_NE (std::future_status::timeout, flushed.wait_for (5s)); ASSERT_FALSE (node.ledger.block_exists (send1->hash ())); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); node.process_active (send2); auto flushed2 = std::async (std::launch::async, [&node] { node.block_processor.flush (); }); ASSERT_NE (std::future_status::timeout, flushed2.wait_for (5s)); ASSERT_TRUE (node.ledger.block_exists (send2->hash ())); } TEST (node, block_processor_full) { nano::system system; nano::node_flags node_flags; node_flags.force_use_write_database_queue = true; node_flags.block_processor_full_size = 3; auto & node = *system.add_node (nano::node_config (nano::get_available_port (), system.logging), node_flags); nano::genesis genesis; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send1->hash ())) .build_shared (); auto send3 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send2->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 3 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send2->hash ())) .build_shared (); // The write guard prevents block processor doing any writes auto write_guard = node.write_database_queue.wait (nano::writer::testing); node.block_processor.add (send1); ASSERT_FALSE (node.block_processor.full ()); node.block_processor.add (send2); ASSERT_FALSE (node.block_processor.full ()); node.block_processor.add (send3); // Block processor may be not full during state blocks signatures verification ASSERT_TIMELY (2s, node.block_processor.full ()); } TEST (node, block_processor_half_full) { nano::system system; nano::node_flags node_flags; node_flags.block_processor_full_size = 6; node_flags.force_use_write_database_queue = true; auto & node = *system.add_node (nano::node_config (nano::get_available_port (), system.logging), node_flags); nano::genesis genesis; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (genesis.hash ())) .build_shared (); auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send1->hash ())) .build_shared (); auto send3 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send2->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 3 * nano::Gxrb_ratio) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node.work_generate_blocking (send2->hash ())) .build_shared (); // The write guard prevents block processor doing any writes auto write_guard = node.write_database_queue.wait (nano::writer::testing); node.block_processor.add (send1); ASSERT_FALSE (node.block_processor.half_full ()); node.block_processor.add (send2); ASSERT_FALSE (node.block_processor.half_full ()); node.block_processor.add (send3); // Block processor may be not half_full during state blocks signatures verification ASSERT_TIMELY (2s, node.block_processor.half_full ()); ASSERT_FALSE (node.block_processor.full ()); } TEST (node, confirm_back) { nano::system system (1); nano::keypair key; auto & node (*system.nodes[0]); nano::genesis genesis; auto genesis_start_balance (node.balance (nano::dev_genesis_key.pub)); auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key.pub) .balance (genesis_start_balance - 1) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); nano::state_block_builder builder; auto open = builder.make_block () .account (key.pub) .previous (0) .representative (key.pub) .balance (1) .link (send1->hash ()) .sign (key.prv, key.pub) .work (*system.work.generate (key.pub)) .build_shared (); auto send2 = builder.make_block () .account (key.pub) .previous (open->hash ()) .representative (key.pub) .balance (0) .link (nano::dev_genesis_key.pub) .sign (key.prv, key.pub) .work (*system.work.generate (open->hash ())) .build_shared (); node.process_active (send1); node.process_active (open); node.process_active (send2); nano::blocks_confirm (node, { send1, open, send2 }); ASSERT_EQ (3, node.active.size ()); std::vector<nano::block_hash> vote_blocks; vote_blocks.push_back (send2->hash ()); auto vote (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, std::numeric_limits<uint64_t>::max (), vote_blocks)); node.vote_processor.vote_blocking (vote, std::make_shared<nano::transport::channel_loopback> (node)); ASSERT_TIMELY (10s, node.active.empty ()); } TEST (node, peers) { nano::system system (1); auto node1 (system.nodes[0]); ASSERT_TRUE (node1->network.empty ()); auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); system.nodes.push_back (node2); auto endpoint = node1->network.endpoint (); nano::endpoint_key endpoint_key{ endpoint.address ().to_v6 ().to_bytes (), endpoint.port () }; auto & store = node2->store; { // Add a peer to the database auto transaction (store.tx_begin_write ()); store.peer_put (transaction, endpoint_key); // Add a peer which is not contactable store.peer_put (transaction, nano::endpoint_key{ boost::asio::ip::address_v6::any ().to_bytes (), 55555 }); } node2->start (); ASSERT_TIMELY (10s, !node2->network.empty () && !node1->network.empty ()) // Wait to finish TCP node ID handshakes ASSERT_TIMELY (10s, node1->bootstrap.realtime_count != 0 && node2->bootstrap.realtime_count != 0); // Confirm that the peers match with the endpoints we are expecting ASSERT_EQ (1, node1->network.size ()); auto list1 (node1->network.list (2)); ASSERT_EQ (node2->network.endpoint (), list1[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::tcp, list1[0]->get_type ()); ASSERT_EQ (1, node2->network.size ()); auto list2 (node2->network.list (2)); ASSERT_EQ (node1->network.endpoint (), list2[0]->get_endpoint ()); ASSERT_EQ (nano::transport::transport_type::tcp, list2[0]->get_type ()); // Stop the peer node and check that it is removed from the store node1->stop (); ASSERT_TIMELY (10s, node2->network.size () != 1); ASSERT_TRUE (node2->network.empty ()); // Uncontactable peer should not be stored auto transaction (store.tx_begin_read ()); ASSERT_EQ (store.peer_count (transaction), 1); ASSERT_TRUE (store.peer_exists (transaction, endpoint_key)); node2->stop (); } TEST (node, peer_cache_restart) { nano::system system (1); auto node1 (system.nodes[0]); ASSERT_TRUE (node1->network.empty ()); auto endpoint = node1->network.endpoint (); nano::endpoint_key endpoint_key{ endpoint.address ().to_v6 ().to_bytes (), endpoint.port () }; auto path (nano::unique_path ()); { auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), path, system.logging, system.work)); system.nodes.push_back (node2); auto & store = node2->store; { // Add a peer to the database auto transaction (store.tx_begin_write ()); store.peer_put (transaction, endpoint_key); } node2->start (); ASSERT_TIMELY (10s, !node2->network.empty ()); // Confirm that the peers match with the endpoints we are expecting auto list (node2->network.list (2)); ASSERT_EQ (node1->network.endpoint (), list[0]->get_endpoint ()); ASSERT_EQ (1, node2->network.size ()); node2->stop (); } // Restart node { nano::node_flags node_flags; node_flags.read_only = true; auto node3 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), path, system.logging, system.work, node_flags)); system.nodes.push_back (node3); // Check cached peers after restart node3->network.start (); node3->add_initial_peers (); auto & store = node3->store; { auto transaction (store.tx_begin_read ()); ASSERT_EQ (store.peer_count (transaction), 1); ASSERT_TRUE (store.peer_exists (transaction, endpoint_key)); } ASSERT_TIMELY (10s, !node3->network.empty ()); // Confirm that the peers match with the endpoints we are expecting auto list (node3->network.list (2)); ASSERT_EQ (node1->network.endpoint (), list[0]->get_endpoint ()); ASSERT_EQ (1, node3->network.size ()); node3->stop (); } } TEST (node, unchecked_cleanup) { nano::system system; nano::node_flags node_flags; node_flags.disable_unchecked_cleanup = true; nano::keypair key; auto & node (*system.add_node (node_flags)); auto open = nano::state_block_builder () .account (key.pub) .previous (0) .representative (key.pub) .balance (1) .link (key.pub) .sign (key.prv, key.pub) .work (*system.work.generate (key.pub)) .build_shared (); std::vector<uint8_t> bytes; { nano::vectorstream stream (bytes); open->serialize (stream); } // Add to the blocks filter // Should be cleared after unchecked cleanup ASSERT_FALSE (node.network.publish_filter.apply (bytes.data (), bytes.size ())); node.process_active (open); node.block_processor.flush (); node.config.unchecked_cutoff_time = std::chrono::seconds (2); { auto transaction (node.store.tx_begin_read ()); auto unchecked_count (node.store.unchecked_count (transaction)); ASSERT_EQ (unchecked_count, 1); ASSERT_EQ (unchecked_count, node.store.unchecked_count (transaction)); } std::this_thread::sleep_for (std::chrono::seconds (1)); node.unchecked_cleanup (); ASSERT_TRUE (node.network.publish_filter.apply (bytes.data (), bytes.size ())); { auto transaction (node.store.tx_begin_read ()); auto unchecked_count (node.store.unchecked_count (transaction)); ASSERT_EQ (unchecked_count, 1); ASSERT_EQ (unchecked_count, node.store.unchecked_count (transaction)); } std::this_thread::sleep_for (std::chrono::seconds (2)); node.unchecked_cleanup (); ASSERT_FALSE (node.network.publish_filter.apply (bytes.data (), bytes.size ())); { auto transaction (node.store.tx_begin_read ()); auto unchecked_count (node.store.unchecked_count (transaction)); ASSERT_EQ (unchecked_count, 0); ASSERT_EQ (unchecked_count, node.store.unchecked_count (transaction)); } } /** This checks that a node can be opened (without being blocked) when a write lock is held elsewhere */ TEST (node, dont_write_lock_node) { auto path = nano::unique_path (); std::promise<void> write_lock_held_promise; std::promise<void> finished_promise; std::thread ([&path, &write_lock_held_promise, &finished_promise]() { nano::logger_mt logger; auto store = nano::make_store (logger, path, false, true); { nano::genesis genesis; nano::ledger_cache ledger_cache; auto transaction (store->tx_begin_write ()); store->initialize (transaction, genesis, ledger_cache); } // Hold write lock open until main thread is done needing it auto transaction (store->tx_begin_write ()); write_lock_held_promise.set_value (); finished_promise.get_future ().wait (); }) .detach (); write_lock_held_promise.get_future ().wait (); // Check inactive node can finish executing while a write lock is open nano::inactive_node node (path, nano::inactive_node_flag_defaults ()); finished_promise.set_value (); } TEST (node, bidirectional_tcp) { #ifdef _WIN32 if (nano::using_rocksdb_in_tests ()) { // Don't test this in rocksdb mode return; } #endif nano::system system; nano::node_flags node_flags; // Disable bootstrap to start elections for new blocks node_flags.disable_legacy_bootstrap = true; node_flags.disable_lazy_bootstrap = true; node_flags.disable_wallet_bootstrap = true; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node1 = system.add_node (node_config, node_flags); node_config.peering_port = nano::get_available_port (); node_config.tcp_incoming_connections_max = 0; // Disable incoming TCP connections for node 2 auto node2 = system.add_node (node_config, node_flags); // Check network connections ASSERT_EQ (1, node1->network.size ()); ASSERT_EQ (1, node2->network.size ()); auto list1 (node1->network.list (1)); ASSERT_EQ (nano::transport::transport_type::tcp, list1[0]->get_type ()); ASSERT_NE (node2->network.endpoint (), list1[0]->get_endpoint ()); // Ephemeral port ASSERT_EQ (node2->node_id.pub, list1[0]->get_node_id ()); auto list2 (node2->network.list (1)); ASSERT_EQ (nano::transport::transport_type::tcp, list2[0]->get_type ()); ASSERT_EQ (node1->network.endpoint (), list2[0]->get_endpoint ()); ASSERT_EQ (node1->node_id.pub, list2[0]->get_node_id ()); // Test block propagation from node 1 nano::genesis genesis; nano::keypair key; nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (genesis.hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .link (key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1->work_generate_blocking (genesis.hash ())) .build_shared (); node1->process_active (send1); node1->block_processor.flush (); ASSERT_TIMELY (10s, node1->ledger.block_exists (send1->hash ()) && node2->ledger.block_exists (send1->hash ())); // Test block confirmation from node 1 (add representative to node 1) system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); // Wait to find new reresentative ASSERT_TIMELY (10s, node2->rep_crawler.representative_count () != 0); /* Wait for confirmation To check connection we need only node 2 confirmation status Node 1 election can be unconfirmed because representative private key was inserted after election start (and node 2 isn't flooding new votes to principal representatives) */ bool confirmed (false); system.deadline_set (10s); while (!confirmed) { auto transaction2 (node2->store.tx_begin_read ()); confirmed = node2->ledger.block_confirmed (transaction2, send1->hash ()); ASSERT_NO_ERROR (system.poll ()); } // Test block propagation & confirmation from node 2 (remove representative from node 1) { auto transaction (system.wallet (0)->wallets.tx_begin_write ()); system.wallet (0)->store.erase (transaction, nano::dev_genesis_key.pub); } /* Test block propagation from node 2 Node 2 has only ephemeral TCP port open. Node 1 cannot establish connection to node 2 listening port */ auto send2 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (send1->hash ()) .representative (nano::dev_genesis_key.pub) .balance (nano::genesis_amount - 2 * nano::Gxrb_ratio) .link (key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1->work_generate_blocking (send1->hash ())) .build_shared (); node2->process_active (send2); node2->block_processor.flush (); ASSERT_TIMELY (10s, node1->ledger.block_exists (send2->hash ()) && node2->ledger.block_exists (send2->hash ())); // Test block confirmation from node 2 (add representative to node 2) system.wallet (1)->insert_adhoc (nano::dev_genesis_key.prv); // Wait to find changed reresentative ASSERT_TIMELY (10s, node1->rep_crawler.representative_count () != 0); /* Wait for confirmation To check connection we need only node 1 confirmation status Node 2 election can be unconfirmed because representative private key was inserted after election start (and node 1 isn't flooding new votes to principal representatives) */ confirmed = false; system.deadline_set (20s); while (!confirmed) { auto transaction1 (node1->store.tx_begin_read ()); confirmed = node1->ledger.block_confirmed (transaction1, send2->hash ()); ASSERT_NO_ERROR (system.poll ()); } } // Tests that local blocks are flooded to all principal representatives // Sanitizers or running within valgrind use different timings and number of nodes TEST (node, aggressive_flooding) { nano::system system; nano::node_flags node_flags; node_flags.disable_request_loop = true; node_flags.disable_block_processor_republishing = true; node_flags.disable_bootstrap_bulk_push_client = true; node_flags.disable_bootstrap_bulk_pull_server = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_lazy_bootstrap = true; node_flags.disable_legacy_bootstrap = true; node_flags.disable_wallet_bootstrap = true; auto & node1 (*system.add_node (node_flags)); auto & wallet1 (*system.wallet (0)); wallet1.insert_adhoc (nano::dev_genesis_key.prv); std::vector<std::pair<std::shared_ptr<nano::node>, std::shared_ptr<nano::wallet>>> nodes_wallets; bool const sanitizer_or_valgrind (is_sanitizer_build || nano::running_within_valgrind ()); nodes_wallets.resize (!sanitizer_or_valgrind ? 5 : 3); std::generate (nodes_wallets.begin (), nodes_wallets.end (), [&system, node_flags]() { nano::node_config node_config (nano::get_available_port (), system.logging); auto node (system.add_node (node_config, node_flags)); return std::make_pair (node, system.wallet (system.nodes.size () - 1)); }); // This test is only valid if a non-aggressive flood would not reach every peer ASSERT_TIMELY (5s, node1.network.size () == nodes_wallets.size ()); ASSERT_LT (node1.network.fanout (), nodes_wallets.size ()); // Send a large amount to create a principal representative in each node auto large_amount = (nano::genesis_amount / 2) / nodes_wallets.size (); std::vector<std::shared_ptr<nano::block>> genesis_blocks; for (auto & node_wallet : nodes_wallets) { nano::keypair keypair; node_wallet.second->store.representative_set (node_wallet.first->wallets.tx_begin_write (), keypair.pub); node_wallet.second->insert_adhoc (keypair.prv); auto block (wallet1.send_action (nano::dev_genesis_key.pub, keypair.pub, large_amount)); genesis_blocks.push_back (block); } // Ensure all nodes have the full genesis chain for (auto & node_wallet : nodes_wallets) { for (auto const & block : genesis_blocks) { node_wallet.first->process (*block); } ASSERT_EQ (node1.latest (nano::dev_genesis_key.pub), node_wallet.first->latest (nano::dev_genesis_key.pub)); } // Wait until the main node sees all representatives ASSERT_TIMELY (!sanitizer_or_valgrind ? 10s : 40s, node1.rep_crawler.principal_representatives ().size () == nodes_wallets.size ()); // Generate blocks and ensure they are sent to all representatives nano::state_block_builder builder; std::shared_ptr<nano::state_block> block{}; { auto transaction (node1.store.tx_begin_read ()); block = builder.make_block () .account (nano::dev_genesis_key.pub) .representative (nano::dev_genesis_key.pub) .previous (node1.ledger.latest (transaction, nano::dev_genesis_key.pub)) .balance (node1.ledger.account_balance (transaction, nano::dev_genesis_key.pub) - 1) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*node1.work_generate_blocking (node1.ledger.latest (transaction, nano::dev_genesis_key.pub))) .build (); } // Processing locally goes through the aggressive block flooding path node1.process_local (block, false); auto all_have_block = [&nodes_wallets](nano::block_hash const & hash_a) { return std::all_of (nodes_wallets.begin (), nodes_wallets.end (), [hash = hash_a](auto const & node_wallet) { return node_wallet.first->block (hash) != nullptr; }); }; ASSERT_TIMELY (!sanitizer_or_valgrind ? 5s : 25s, all_have_block (block->hash ())); // Do the same for a wallet block auto wallet_block = wallet1.send_sync (nano::dev_genesis_key.pub, nano::dev_genesis_key.pub, 10); ASSERT_TIMELY (!sanitizer_or_valgrind ? 5s : 25s, all_have_block (wallet_block)); // All blocks: genesis + (send+open) for each representative + 2 local blocks // The main node only sees all blocks if other nodes are flooding their PR's open block to all other PRs ASSERT_EQ (1 + 2 * nodes_wallets.size () + 2, node1.ledger.cache.block_count); } TEST (active_difficulty, recalculate_work) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; auto & node1 = *system.add_node (node_config); nano::genesis genesis; nano::keypair key1; ASSERT_EQ (node1.network_params.network.publish_thresholds.epoch_2, node1.active.active_difficulty ()); auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key1.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto multiplier1 = nano::difficulty::to_multiplier (send1->difficulty (), node1.network_params.network.publish_thresholds.epoch_2); // Process as local block node1.process_active (send1); ASSERT_TIMELY (2s, !node1.active.empty ()); auto sum (std::accumulate (node1.active.multipliers_cb.begin (), node1.active.multipliers_cb.end (), double(0))); ASSERT_EQ (node1.active.active_difficulty (), nano::difficulty::from_multiplier (sum / node1.active.multipliers_cb.size (), node1.network_params.network.publish_thresholds.epoch_2)); nano::unique_lock<nano::mutex> lock (node1.active.mutex); // Fake history records to force work recalculation for (auto i (0); i < node1.active.multipliers_cb.size (); i++) { node1.active.multipliers_cb.push_back (multiplier1 * (1 + i / 100.)); } node1.work_generate_blocking (*send1); node1.process_active (send1); node1.active.update_active_multiplier (lock); sum = std::accumulate (node1.active.multipliers_cb.begin (), node1.active.multipliers_cb.end (), double(0)); ASSERT_EQ (node1.active.trended_active_multiplier.load (), sum / node1.active.multipliers_cb.size ()); lock.unlock (); } TEST (node, node_sequence) { nano::system system (3); ASSERT_EQ (0, system.nodes[0]->node_seq); ASSERT_EQ (0, system.nodes[0]->node_seq); ASSERT_EQ (1, system.nodes[1]->node_seq); ASSERT_EQ (2, system.nodes[2]->node_seq); } TEST (node, rollback_vote_self) { nano::system system; nano::node_flags flags; flags.disable_request_loop = true; auto & node = *system.add_node (flags); nano::state_block_builder builder; nano::keypair key; auto weight = node.online_reps.delta (); auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (nano::genesis_hash) .representative (nano::dev_genesis_key.pub) .link (key.pub) .balance (nano::genesis_amount - weight) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build_shared (); auto open = builder.make_block () .account (key.pub) .previous (0) .representative (key.pub) .link (send1->hash ()) .balance (weight) .sign (key.prv, key.pub) .work (*system.work.generate (key.pub)) .build_shared (); auto send2 = builder.make_block () .from (*send1) .previous (send1->hash ()) .balance (send1->balance ().number () - 1) .link (nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send1->hash ())) .build_shared (); auto fork = builder.make_block () .from (*send2) .balance (send2->balance ().number () - 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .build_shared (); ASSERT_EQ (nano::process_result::progress, node.process (*send1).code); ASSERT_EQ (nano::process_result::progress, node.process (*open).code); // Confirm blocks to allow voting node.block_confirm (open); auto election = node.active.election (open->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); ASSERT_TIMELY (5s, node.ledger.cache.cemented_count == 3); ASSERT_EQ (weight, node.weight (key.pub)); node.process_active (send2); node.process_active (fork); node.block_processor.flush (); election = node.active.election (send2->qualified_root ()); ASSERT_NE (nullptr, election); ASSERT_EQ (2, election->blocks ().size ()); // Insert genesis key in the wallet system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); { // The write guard prevents the block processor from performing the rollback auto write_guard = node.write_database_queue.wait (nano::writer::testing); { ASSERT_EQ (1, election->votes ().size ()); // Vote with key to switch the winner election->vote (key.pub, 0, fork->hash ()); ASSERT_EQ (2, election->votes ().size ()); // The winner changed ASSERT_EQ (election->winner (), fork); } // Even without the rollback being finished, the aggregator must reply with a vote for the new winner, not the old one ASSERT_TRUE (node.history.votes (send2->root (), send2->hash ()).empty ()); ASSERT_TRUE (node.history.votes (fork->root (), fork->hash ()).empty ()); auto & node2 = *system.add_node (); auto channel (node.network.udp_channels.create (node2.network.endpoint ())); node.aggregator.add (channel, { { send2->hash (), send2->root () } }); ASSERT_TIMELY (5s, !node.history.votes (fork->root (), fork->hash ()).empty ()); ASSERT_TRUE (node.history.votes (send2->root (), send2->hash ()).empty ()); // Going out of the scope allows the rollback to complete } // A vote is eventually generated from the local representative ASSERT_TIMELY (5s, 3 == election->votes ().size ()); auto votes (election->votes ()); auto vote (votes.find (nano::dev_genesis_key.pub)); ASSERT_NE (votes.end (), vote); ASSERT_EQ (fork->hash (), vote->second.hash); } // Confirm a complex dependency graph starting from the first block TEST (node, dependency_graph) { nano::system system; nano::node_config config (nano::get_available_port (), system.logging); config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto & node = *system.add_node (config); nano::state_block_builder builder; nano::keypair key1, key2, key3; // Send to key1 auto gen_send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (nano::genesis_hash) .representative (nano::dev_genesis_key.pub) .link (key1.pub) .balance (nano::genesis_amount - 1) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build_shared (); // Receive from genesis auto key1_open = builder.make_block () .account (key1.pub) .previous (0) .representative (key1.pub) .link (gen_send1->hash ()) .balance (1) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1.pub)) .build (); // Send to genesis auto key1_send1 = builder.make_block () .account (key1.pub) .previous (key1_open->hash ()) .representative (key1.pub) .link (nano::dev_genesis_key.pub) .balance (0) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1_open->hash ())) .build (); // Receive from key1 auto gen_receive = builder.make_block () .from (*gen_send1) .previous (gen_send1->hash ()) .link (key1_send1->hash ()) .balance (nano::genesis_amount) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (gen_send1->hash ())) .build (); // Send to key2 auto gen_send2 = builder.make_block () .from (*gen_receive) .previous (gen_receive->hash ()) .link (key2.pub) .balance (gen_receive->balance ().number () - 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (gen_receive->hash ())) .build (); // Receive from genesis auto key2_open = builder.make_block () .account (key2.pub) .previous (0) .representative (key2.pub) .link (gen_send2->hash ()) .balance (2) .sign (key2.prv, key2.pub) .work (*system.work.generate (key2.pub)) .build (); // Send to key3 auto key2_send1 = builder.make_block () .account (key2.pub) .previous (key2_open->hash ()) .representative (key2.pub) .link (key3.pub) .balance (1) .sign (key2.prv, key2.pub) .work (*system.work.generate (key2_open->hash ())) .build (); // Receive from key2 auto key3_open = builder.make_block () .account (key3.pub) .previous (0) .representative (key3.pub) .link (key2_send1->hash ()) .balance (1) .sign (key3.prv, key3.pub) .work (*system.work.generate (key3.pub)) .build (); // Send to key1 auto key2_send2 = builder.make_block () .from (*key2_send1) .previous (key2_send1->hash ()) .link (key1.pub) .balance (key2_send1->balance ().number () - 1) .sign (key2.prv, key2.pub) .work (*system.work.generate (key2_send1->hash ())) .build (); // Receive from key2 auto key1_receive = builder.make_block () .from (*key1_send1) .previous (key1_send1->hash ()) .link (key2_send2->hash ()) .balance (key1_send1->balance ().number () + 1) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1_send1->hash ())) .build (); // Send to key3 auto key1_send2 = builder.make_block () .from (*key1_receive) .previous (key1_receive->hash ()) .link (key3.pub) .balance (key1_receive->balance ().number () - 1) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1_receive->hash ())) .build (); // Receive from key1 auto key3_receive = builder.make_block () .from (*key3_open) .previous (key3_open->hash ()) .link (key1_send2->hash ()) .balance (key3_open->balance ().number () + 1) .sign (key3.prv, key3.pub) .work (*system.work.generate (key3_open->hash ())) .build (); // Upgrade key3 auto key3_epoch = builder.make_block () .from (*key3_receive) .previous (key3_receive->hash ()) .link (node.ledger.epoch_link (nano::epoch::epoch_1)) .balance (key3_receive->balance ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (key3_receive->hash ())) .build (); ASSERT_EQ (nano::process_result::progress, node.process (*gen_send1).code); ASSERT_EQ (nano::process_result::progress, node.process (*key1_open).code); ASSERT_EQ (nano::process_result::progress, node.process (*key1_send1).code); ASSERT_EQ (nano::process_result::progress, node.process (*gen_receive).code); ASSERT_EQ (nano::process_result::progress, node.process (*gen_send2).code); ASSERT_EQ (nano::process_result::progress, node.process (*key2_open).code); ASSERT_EQ (nano::process_result::progress, node.process (*key2_send1).code); ASSERT_EQ (nano::process_result::progress, node.process (*key3_open).code); ASSERT_EQ (nano::process_result::progress, node.process (*key2_send2).code); ASSERT_EQ (nano::process_result::progress, node.process (*key1_receive).code); ASSERT_EQ (nano::process_result::progress, node.process (*key1_send2).code); ASSERT_EQ (nano::process_result::progress, node.process (*key3_receive).code); ASSERT_EQ (nano::process_result::progress, node.process (*key3_epoch).code); ASSERT_TRUE (node.active.empty ()); // Hash -> Ancestors std::unordered_map<nano::block_hash, std::vector<nano::block_hash>> dependency_graph{ { key1_open->hash (), { gen_send1->hash () } }, { key1_send1->hash (), { key1_open->hash () } }, { gen_receive->hash (), { gen_send1->hash (), key1_open->hash () } }, { gen_send2->hash (), { gen_receive->hash () } }, { key2_open->hash (), { gen_send2->hash () } }, { key2_send1->hash (), { key2_open->hash () } }, { key3_open->hash (), { key2_send1->hash () } }, { key2_send2->hash (), { key2_send1->hash () } }, { key1_receive->hash (), { key1_send1->hash (), key2_send2->hash () } }, { key1_send2->hash (), { key1_send1->hash () } }, { key3_receive->hash (), { key3_open->hash (), key1_send2->hash () } }, { key3_epoch->hash (), { key3_receive->hash () } }, }; ASSERT_EQ (node.ledger.cache.block_count - 2, dependency_graph.size ()); // Start an election for the first block of the dependency graph, and ensure all blocks are eventually confirmed system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); node.block_confirm (gen_send1); ASSERT_NO_ERROR (system.poll_until_true (15s, [&] { // Not many blocks should be active simultaneously EXPECT_LT (node.active.size (), 6); nano::lock_guard<nano::mutex> guard (node.active.mutex); // Ensure that active blocks have their ancestors confirmed auto error = std::any_of (dependency_graph.cbegin (), dependency_graph.cend (), [&](auto entry) { if (node.active.blocks.count (entry.first)) { for (auto ancestor : entry.second) { if (!node.block_confirmed (ancestor)) { return true; } } } return false; }); EXPECT_FALSE (error); return error || node.ledger.cache.cemented_count == node.ledger.cache.block_count; })); ASSERT_EQ (node.ledger.cache.cemented_count, node.ledger.cache.block_count); ASSERT_TIMELY (5s, node.active.empty ()); } // Confirm a complex dependency graph. Uses frontiers confirmation which will fail to // confirm a frontier optimistically then fallback to pessimistic confirmation. TEST (node, dependency_graph_frontier) { nano::system system; nano::node_config config (nano::get_available_port (), system.logging); config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto & node1 = *system.add_node (config); config.peering_port = nano::get_available_port (); config.frontiers_confirmation = nano::frontiers_confirmation_mode::always; auto & node2 = *system.add_node (config); nano::state_block_builder builder; nano::keypair key1, key2, key3; // Send to key1 auto gen_send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (nano::genesis_hash) .representative (nano::dev_genesis_key.pub) .link (key1.pub) .balance (nano::genesis_amount - 1) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build_shared (); // Receive from genesis auto key1_open = builder.make_block () .account (key1.pub) .previous (0) .representative (key1.pub) .link (gen_send1->hash ()) .balance (1) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1.pub)) .build (); // Send to genesis auto key1_send1 = builder.make_block () .account (key1.pub) .previous (key1_open->hash ()) .representative (key1.pub) .link (nano::dev_genesis_key.pub) .balance (0) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1_open->hash ())) .build (); // Receive from key1 auto gen_receive = builder.make_block () .from (*gen_send1) .previous (gen_send1->hash ()) .link (key1_send1->hash ()) .balance (nano::genesis_amount) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (gen_send1->hash ())) .build (); // Send to key2 auto gen_send2 = builder.make_block () .from (*gen_receive) .previous (gen_receive->hash ()) .link (key2.pub) .balance (gen_receive->balance ().number () - 2) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (gen_receive->hash ())) .build (); // Receive from genesis auto key2_open = builder.make_block () .account (key2.pub) .previous (0) .representative (key2.pub) .link (gen_send2->hash ()) .balance (2) .sign (key2.prv, key2.pub) .work (*system.work.generate (key2.pub)) .build (); // Send to key3 auto key2_send1 = builder.make_block () .account (key2.pub) .previous (key2_open->hash ()) .representative (key2.pub) .link (key3.pub) .balance (1) .sign (key2.prv, key2.pub) .work (*system.work.generate (key2_open->hash ())) .build (); // Receive from key2 auto key3_open = builder.make_block () .account (key3.pub) .previous (0) .representative (key3.pub) .link (key2_send1->hash ()) .balance (1) .sign (key3.prv, key3.pub) .work (*system.work.generate (key3.pub)) .build (); // Send to key1 auto key2_send2 = builder.make_block () .from (*key2_send1) .previous (key2_send1->hash ()) .link (key1.pub) .balance (key2_send1->balance ().number () - 1) .sign (key2.prv, key2.pub) .work (*system.work.generate (key2_send1->hash ())) .build (); // Receive from key2 auto key1_receive = builder.make_block () .from (*key1_send1) .previous (key1_send1->hash ()) .link (key2_send2->hash ()) .balance (key1_send1->balance ().number () + 1) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1_send1->hash ())) .build (); // Send to key3 auto key1_send2 = builder.make_block () .from (*key1_receive) .previous (key1_receive->hash ()) .link (key3.pub) .balance (key1_receive->balance ().number () - 1) .sign (key1.prv, key1.pub) .work (*system.work.generate (key1_receive->hash ())) .build (); // Receive from key1 auto key3_receive = builder.make_block () .from (*key3_open) .previous (key3_open->hash ()) .link (key1_send2->hash ()) .balance (key3_open->balance ().number () + 1) .sign (key3.prv, key3.pub) .work (*system.work.generate (key3_open->hash ())) .build (); // Upgrade key3 auto key3_epoch = builder.make_block () .from (*key3_receive) .previous (key3_receive->hash ()) .link (node1.ledger.epoch_link (nano::epoch::epoch_1)) .balance (key3_receive->balance ()) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (key3_receive->hash ())) .build (); for (auto const & node : system.nodes) { auto transaction (node->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *gen_send1).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key1_open).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key1_send1).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *gen_receive).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *gen_send2).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key2_open).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key2_send1).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key3_open).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key2_send2).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key1_receive).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key1_send2).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key3_receive).code); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *key3_epoch).code); } // node1 can vote, but only on the first block system.wallet (0)->insert_adhoc (nano::dev_genesis_key.prv); ASSERT_TIMELY (10s, node2.active.active (gen_send1->qualified_root ())); node1.block_confirm (gen_send1); ASSERT_TIMELY (15s, node1.ledger.cache.cemented_count == node1.ledger.cache.block_count); ASSERT_TIMELY (15s, node2.ledger.cache.cemented_count == node2.ledger.cache.block_count); } namespace nano { TEST (node, deferred_dependent_elections) { nano::system system; nano::node_flags flags; flags.disable_request_loop = true; auto & node = *system.add_node (flags); auto & node2 = *system.add_node (flags); // node2 will be used to ensure all blocks are being propagated nano::state_block_builder builder; nano::keypair key; auto send1 = builder.make_block () .account (nano::dev_genesis_key.pub) .previous (nano::genesis_hash) .representative (nano::dev_genesis_key.pub) .link (key.pub) .balance (nano::genesis_amount - 1) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (nano::genesis_hash)) .build_shared (); auto open = builder.make_block () .account (key.pub) .previous (0) .representative (key.pub) .link (send1->hash ()) .balance (1) .sign (key.prv, key.pub) .work (*system.work.generate (key.pub)) .build_shared (); auto send2 = builder.make_block () .from (*send1) .previous (send1->hash ()) .balance (send1->balance ().number () - 1) .link (key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send1->hash ())) .build_shared (); auto receive = builder.make_block () .from (*open) .previous (open->hash ()) .link (send2->hash ()) .balance (2) .sign (key.prv, key.pub) .work (*system.work.generate (open->hash ())) .build_shared (); auto fork = builder.make_block () .from (*receive) .representative (nano::dev_genesis_key.pub) // was key.pub .sign (key.prv, key.pub) .build_shared (); node.process_local (send1); node.block_processor.flush (); auto election_send1 = node.active.election (send1->qualified_root ()); ASSERT_NE (nullptr, election_send1); // Should process and republish but not start an election for any dependent blocks node.process_local (open, false); node.process_local (send2, false); node.block_processor.flush (); ASSERT_TRUE (node.block (open->hash ())); ASSERT_TRUE (node.block (send2->hash ())); ASSERT_FALSE (node.active.active (open->qualified_root ())); ASSERT_FALSE (node.active.active (send2->qualified_root ())); ASSERT_TIMELY (2s, node2.block (open->hash ())); ASSERT_TIMELY (2s, node2.block (send2->hash ())); // Re-processing older blocks with updated work also does not start an election node.work_generate_blocking (*open, open->difficulty () + 1); node.process_local (open, false); node.block_processor.flush (); ASSERT_FALSE (node.active.active (open->qualified_root ())); /// However, work is still updated ASSERT_TIMELY (3s, node.store.block_get (node.store.tx_begin_read (), open->hash ())->block_work () == open->block_work ()); // It is however possible to manually start an election from elsewhere node.block_confirm (open); ASSERT_TRUE (node.active.active (open->qualified_root ())); node.active.erase (*open); ASSERT_FALSE (node.active.active (open->qualified_root ())); /// The election was dropped but it's still not possible to restart it node.work_generate_blocking (*open, open->difficulty () + 1); ASSERT_FALSE (node.active.active (open->qualified_root ())); node.process_local (open, false); node.block_processor.flush (); ASSERT_FALSE (node.active.active (open->qualified_root ())); /// However, work is still updated ASSERT_TIMELY (3s, node.store.block_get (node.store.tx_begin_read (), open->hash ())->block_work () == open->block_work ()); // Frontier confirmation also starts elections ASSERT_NO_ERROR (system.poll_until_true (5s, [&node, &send2] { nano::unique_lock<nano::mutex> lock{ node.active.mutex }; node.active.frontiers_confirmation (lock); lock.unlock (); return node.active.election (send2->qualified_root ()) != nullptr; })); // Drop both elections node.active.erase (*open); ASSERT_FALSE (node.active.active (open->qualified_root ())); node.active.erase (*send2); ASSERT_FALSE (node.active.active (send2->qualified_root ())); // Confirming send1 will automatically start elections for the dependents election_send1->force_confirm (); ASSERT_TIMELY (2s, node.block_confirmed (send1->hash ())); ASSERT_TIMELY (2s, node.active.active (open->qualified_root ()) && node.active.active (send2->qualified_root ())); auto election_open = node.active.election (open->qualified_root ()); ASSERT_NE (nullptr, election_open); auto election_send2 = node.active.election (send2->qualified_root ()); ASSERT_NE (nullptr, election_open); // Confirm one of the dependents of the receive but not the other, to ensure both have to be confirmed to start an election on processing ASSERT_EQ (nano::process_result::progress, node.process (*receive).code); ASSERT_FALSE (node.active.active (receive->qualified_root ())); election_open->force_confirm (); ASSERT_TIMELY (2s, node.block_confirmed (open->hash ())); ASSERT_FALSE (node.ledger.dependents_confirmed (node.store.tx_begin_read (), *receive)); std::this_thread::sleep_for (500ms); ASSERT_FALSE (node.active.active (receive->qualified_root ())); ASSERT_FALSE (node.ledger.rollback (node.store.tx_begin_write (), receive->hash ())); ASSERT_FALSE (node.block (receive->hash ())); node.process_local (receive, false); node.block_processor.flush (); ASSERT_TRUE (node.block (receive->hash ())); ASSERT_FALSE (node.active.active (receive->qualified_root ())); // Processing a fork will also not start an election ASSERT_EQ (nano::process_result::fork, node.process (*fork).code); node.process_local (fork, false); node.block_processor.flush (); ASSERT_FALSE (node.active.active (receive->qualified_root ())); // Confirming the other dependency allows starting an election from a fork election_send2->force_confirm (); ASSERT_TIMELY (2s, node.block_confirmed (send2->hash ())); ASSERT_TIMELY (2s, node.active.active (receive->qualified_root ())); node.active.erase (*receive); ASSERT_FALSE (node.active.active (receive->qualified_root ())); node.work_generate_blocking (*receive, receive->difficulty () + 1); node.process_local (receive, false); node.block_processor.flush (); ASSERT_TRUE (node.active.active (receive->qualified_root ())); } } TEST (rep_crawler, recently_confirmed) { nano::system system (1); auto & node1 (*system.nodes[0]); ASSERT_EQ (1, node1.ledger.cache.block_count); auto const block = nano::genesis ().open; node1.active.add_recently_confirmed (block->qualified_root (), block->hash ()); auto & node2 (*system.add_node ()); system.wallet (1)->insert_adhoc (nano::dev_genesis_key.prv); auto channel = node1.network.find_channel (node2.network.endpoint ()); ASSERT_NE (nullptr, channel); node1.rep_crawler.query (channel); ASSERT_TIMELY (3s, node1.rep_crawler.representative_count () == 1); } namespace nano { TEST (rep_crawler, local) { nano::system system; nano::node_flags flags; flags.disable_rep_crawler = true; auto & node = *system.add_node (flags); auto loopback = std::make_shared<nano::transport::channel_loopback> (node); auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, std::vector{ nano::genesis_hash }); { nano::lock_guard<nano::mutex> guard (node.rep_crawler.probable_reps_mutex); node.rep_crawler.active.insert (nano::genesis_hash); node.rep_crawler.responses.emplace_back (loopback, vote); } node.rep_crawler.validate (); ASSERT_EQ (0, node.rep_crawler.representative_count ()); } } TEST (node, pruning_automatic) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.max_pruning_age = std::chrono::seconds (1); node_config.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto & node1 = *system.add_node (node_config, node_flags); nano::genesis genesis; nano::keypair key1; auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = nano::send_block_builder () .previous (send1->hash ()) .destination (key1.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send1->hash ())) .build_shared (); // Process as local blocks node1.process_active (send1); node1.process_active (send2); node1.block_processor.flush (); // Confirm last block to prune previous { auto election = node1.active.election (send1->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node1.block_confirmed (send1->hash ()) && node1.active.active (send2->qualified_root ())); ASSERT_EQ (0, node1.ledger.cache.pruned_count); { auto election = node1.active.election (send2->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node1.active.empty () && node1.block_confirmed (send2->hash ())); // Check pruning result ASSERT_TIMELY (3s, node1.ledger.cache.pruned_count == 1); ASSERT_TIMELY (2s, node1.store.pruned_count (node1.store.tx_begin_read ()) == 1); // Transaction commit ASSERT_EQ (1, node1.ledger.cache.pruned_count); ASSERT_EQ (3, node1.ledger.cache.block_count); ASSERT_TRUE (node1.ledger.block_exists (genesis.hash ())); ASSERT_FALSE (node1.ledger.block_exists (send1->hash ())); ASSERT_TRUE (node1.ledger.block_or_pruned_exists (send1->hash ())); ASSERT_TRUE (node1.ledger.block_exists (send2->hash ())); } TEST (node, pruning_age) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto & node1 = *system.add_node (node_config, node_flags); nano::genesis genesis; nano::keypair key1; auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = nano::send_block_builder () .previous (send1->hash ()) .destination (key1.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send1->hash ())) .build_shared (); // Process as local blocks node1.process_active (send1); node1.process_active (send2); node1.block_processor.flush (); // Confirm last block to prune previous { auto election = node1.active.election (send1->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node1.block_confirmed (send1->hash ()) && node1.active.active (send2->qualified_root ())); ASSERT_EQ (0, node1.ledger.cache.pruned_count); { auto election = node1.active.election (send2->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node1.active.empty () && node1.block_confirmed (send2->hash ())); // Pruning with default age 1 day node1.ledger_pruning (1, true, false); ASSERT_EQ (0, node1.ledger.cache.pruned_count); ASSERT_EQ (3, node1.ledger.cache.block_count); // Pruning with max age 0 node1.config.max_pruning_age = std::chrono::seconds (0); node1.ledger_pruning (1, true, false); ASSERT_EQ (1, node1.ledger.cache.pruned_count); ASSERT_EQ (3, node1.ledger.cache.block_count); ASSERT_TRUE (node1.ledger.block_exists (genesis.hash ())); ASSERT_FALSE (node1.ledger.block_exists (send1->hash ())); ASSERT_TRUE (node1.ledger.block_or_pruned_exists (send1->hash ())); ASSERT_TRUE (node1.ledger.block_exists (send2->hash ())); } TEST (node, pruning_depth) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto & node1 = *system.add_node (node_config, node_flags); nano::genesis genesis; nano::keypair key1; auto send1 = nano::send_block_builder () .previous (genesis.hash ()) .destination (key1.pub) .balance (nano::genesis_amount - nano::Gxrb_ratio) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (genesis.hash ())) .build_shared (); auto send2 = nano::send_block_builder () .previous (send1->hash ()) .destination (key1.pub) .balance (0) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .work (*system.work.generate (send1->hash ())) .build_shared (); // Process as local blocks node1.process_active (send1); node1.process_active (send2); node1.block_processor.flush (); // Confirm last block to prune previous { auto election = node1.active.election (send1->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node1.block_confirmed (send1->hash ()) && node1.active.active (send2->qualified_root ())); ASSERT_EQ (0, node1.ledger.cache.pruned_count); { auto election = node1.active.election (send2->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node1.active.empty () && node1.block_confirmed (send2->hash ())); // Pruning with default depth (unlimited) node1.ledger_pruning (1, true, false); ASSERT_EQ (0, node1.ledger.cache.pruned_count); ASSERT_EQ (3, node1.ledger.cache.block_count); // Pruning with max depth 1 node1.config.max_pruning_depth = 1; node1.ledger_pruning (1, true, false); ASSERT_EQ (1, node1.ledger.cache.pruned_count); ASSERT_EQ (3, node1.ledger.cache.block_count); ASSERT_TRUE (node1.ledger.block_exists (genesis.hash ())); ASSERT_FALSE (node1.ledger.block_exists (send1->hash ())); ASSERT_TRUE (node1.ledger.block_or_pruned_exists (send1->hash ())); ASSERT_TRUE (node1.ledger.block_exists (send2->hash ())); } namespace { void add_required_children_node_config_tree (nano::jsonconfig & tree) { nano::logging logging1; nano::jsonconfig logging_l; logging1.serialize_json (logging_l); tree.put_child ("logging", logging_l); nano::jsonconfig preconfigured_peers_l; tree.put_child ("preconfigured_peers", preconfigured_peers_l); nano::jsonconfig preconfigured_representatives_l; tree.put_child ("preconfigured_representatives", preconfigured_representatives_l); nano::jsonconfig work_peers_l; tree.put_child ("work_peers", work_peers_l); tree.put ("version", std::to_string (nano::node_config::json_version ())); } }
1
16,666
`+` -> ` + ` I guess. Same below
nanocurrency-nano-node
cpp