patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -26,6 +26,11 @@ func (s *AuditLogsSourceStatus) GetCondition(t apis.ConditionType) *apis.Conditi
return auditLogsSourceCondSet.Manage(s).GetCondition(t)
}
+// GetTopLevelCondition returns the top level condition.
+func (s *AuditLogsSourceStatus) GetTopLevelCondition() *apis.Condition {
+ return auditLogsSourceCondSet.Manage(s).GetTopLevelCondition()
+}
+
// IsReady returns true if the resource is ready overall.
func (s *AuditLogsSourceStatus) IsReady() bool {
return auditLogsSourceCondSet.Manage(s).IsHappy() | 1 | /*
Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
duckv1alpha1 "github.com/google/knative-gcp/pkg/apis/duck/v1alpha1"
"knative.dev/pkg/apis"
)
// GetCondition returns the condition currently associated with the given type, or nil.
func (s *AuditLogsSourceStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return auditLogsSourceCondSet.Manage(s).GetCondition(t)
}
// IsReady returns true if the resource is ready overall.
func (s *AuditLogsSourceStatus) IsReady() bool {
return auditLogsSourceCondSet.Manage(s).IsHappy()
}
// InitializeConditions sets relevant unset conditions to Unknown state.
func (s *AuditLogsSourceStatus) InitializeConditions() {
auditLogsSourceCondSet.Manage(s).InitializeConditions()
}
// MarkPullSubscriptionNotReady sets the condition that the underlying PullSubscription
// source is not ready and why.
func (s *AuditLogsSourceStatus) MarkPullSubscriptionNotReady(reason, messageFormat string, messageA ...interface{}) {
auditLogsSourceCondSet.Manage(s).MarkFalse(duckv1alpha1.PullSubscriptionReady, reason, messageFormat, messageA...)
}
// MarkPullSubscriptionReady sets the condition that the underlying PubSub source is ready.
func (s *AuditLogsSourceStatus) MarkPullSubscriptionReady() {
auditLogsSourceCondSet.Manage(s).MarkTrue(duckv1alpha1.PullSubscriptionReady)
}
// MarkTopicNotReady sets the condition that the PubSub topic was not created and why.
func (s *AuditLogsSourceStatus) MarkTopicNotReady(reason, messageFormat string, messageA ...interface{}) {
auditLogsSourceCondSet.Manage(s).MarkFalse(duckv1alpha1.TopicReady, reason, messageFormat, messageA...)
}
// MarkTopicReady sets the condition that the underlying PubSub topic was created successfully.
func (s *AuditLogsSourceStatus) MarkTopicReady() {
auditLogsSourceCondSet.Manage(s).MarkTrue(duckv1alpha1.TopicReady)
}
// MarkSinkNotReady sets the condition that a AuditLogsSource pubsub sink
// has not been configured and why.
func (s *AuditLogsSourceStatus) MarkSinkNotReady(reason, messageFormat string, messageA ...interface{}) {
auditLogsSourceCondSet.Manage(s).MarkFalse(SinkReady, reason, messageFormat, messageA...)
}
func (s *AuditLogsSourceStatus) MarkSinkReady() {
auditLogsSourceCondSet.Manage(s).MarkTrue(SinkReady)
}
| 1 | 10,326 | sorry, I wasn't familiar with this TopLevelCondition... What it would be in this case? The AuditsLogReady condition? | google-knative-gcp | go |
@@ -109,6 +109,7 @@ type DiffTransformation struct {
d execute.Dataset
cache execute.TableBuilderCache
+ alloc *memory.Allocator
inputCache *execute.GroupLookup
} | 1 | package testing
import (
"bytes"
"errors"
"fmt"
"sort"
"sync"
"github.com/apache/arrow/go/arrow/array"
"github.com/influxdata/flux"
"github.com/influxdata/flux/arrow"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/semantic"
)
const DiffKind = "diff"
type DiffOpSpec struct {
Verbose bool `json:"verbose,omitempty"`
}
func (s *DiffOpSpec) Kind() flux.OperationKind {
return DiffKind
}
func init() {
diffSignature := semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{
"verbose": semantic.Bool,
"got": flux.TableObjectType,
"want": flux.TableObjectType,
},
Required: semantic.LabelSet{"got", "want"},
Return: flux.TableObjectType,
PipeArgument: "got",
}
flux.RegisterPackageValue("testing", "diff", flux.FunctionValue(DiffKind, createDiffOpSpec, diffSignature))
flux.RegisterOpSpec(DiffKind, newDiffOp)
plan.RegisterProcedureSpec(DiffKind, newDiffProcedure, DiffKind)
execute.RegisterTransformation(DiffKind, createDiffTransformation)
}
func createDiffOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) {
t, err := args.GetRequiredObject("want")
if err != nil {
return nil, err
}
p, ok := t.(*flux.TableObject)
if !ok {
return nil, errors.New("want input to diff is not a table object")
}
a.AddParent(p)
t, err = args.GetRequiredObject("got")
if err != nil {
return nil, err
}
p, ok = t.(*flux.TableObject)
if !ok {
return nil, errors.New("got input to diff is not a table object")
}
a.AddParent(p)
verbose, ok, err := args.GetBool("verbose")
if err != nil {
return nil, err
} else if !ok {
verbose = false
}
return &DiffOpSpec{Verbose: verbose}, nil
}
func newDiffOp() flux.OperationSpec {
return new(DiffOpSpec)
}
type DiffProcedureSpec struct {
plan.DefaultCost
Verbose bool
}
func (s *DiffProcedureSpec) Kind() plan.ProcedureKind {
return DiffKind
}
func (s *DiffProcedureSpec) Copy() plan.ProcedureSpec {
ns := *s
return &ns
}
func newDiffProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) {
spec, ok := qs.(*DiffOpSpec)
if !ok {
return nil, fmt.Errorf("invalid spec type %T", qs)
}
return &DiffProcedureSpec{Verbose: spec.Verbose}, nil
}
type DiffTransformation struct {
mu sync.Mutex
wantID, gotID execute.DatasetID
finished map[execute.DatasetID]bool
d execute.Dataset
cache execute.TableBuilderCache
inputCache *execute.GroupLookup
}
type tableBuffer struct {
id execute.DatasetID
columns map[string]*tableColumn
sz int
}
func (tb *tableBuffer) Release() {
for _, col := range tb.columns {
col.Values.Release()
}
}
type tableColumn struct {
Type flux.ColType
Values array.Interface
}
func copyTable(id execute.DatasetID, tbl flux.Table) (*tableBuffer, error) {
// Find the value columns for the table and save them.
// We do not care about the group key.
type tableBuilderColumn struct {
Type flux.ColType
Builder array.Builder
}
builders := make(map[string]tableBuilderColumn)
for _, col := range tbl.Cols() {
if tbl.Key().HasCol(col.Label) {
continue
}
bc := tableBuilderColumn{Type: col.Type}
switch col.Type {
case flux.TFloat:
bc.Builder = arrow.NewFloatBuilder(nil)
case flux.TInt:
bc.Builder = arrow.NewIntBuilder(nil)
case flux.TUInt:
bc.Builder = arrow.NewUintBuilder(nil)
case flux.TString:
bc.Builder = arrow.NewStringBuilder(nil)
case flux.TBool:
bc.Builder = arrow.NewBoolBuilder(nil)
case flux.TTime:
bc.Builder = arrow.NewIntBuilder(nil)
default:
return nil, errors.New("implement me")
}
builders[col.Label] = bc
}
sz := 0
if err := tbl.Do(func(cr flux.ColReader) error {
sz += cr.Len()
for j, col := range cr.Cols() {
if tbl.Key().HasCol(col.Label) {
continue
}
switch col.Type {
case flux.TFloat:
b := builders[col.Label].Builder.(*array.Float64Builder)
b.Reserve(cr.Len())
vs := cr.Floats(j)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
b.Append(vs.Value(i))
} else {
b.AppendNull()
}
}
case flux.TInt:
b := builders[col.Label].Builder.(*array.Int64Builder)
b.Reserve(cr.Len())
vs := cr.Ints(j)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
b.Append(vs.Value(i))
} else {
b.AppendNull()
}
}
case flux.TUInt:
b := builders[col.Label].Builder.(*array.Uint64Builder)
b.Reserve(cr.Len())
vs := cr.UInts(j)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
b.Append(vs.Value(i))
} else {
b.AppendNull()
}
}
case flux.TString:
b := builders[col.Label].Builder.(*array.BinaryBuilder)
b.Reserve(cr.Len())
vs := cr.Strings(j)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
b.Append(vs.Value(i))
} else {
b.AppendNull()
}
}
case flux.TBool:
b := builders[col.Label].Builder.(*array.BooleanBuilder)
b.Reserve(cr.Len())
vs := cr.Bools(j)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
b.Append(vs.Value(i))
} else {
b.AppendNull()
}
}
case flux.TTime:
b := builders[col.Label].Builder.(*array.Int64Builder)
b.Reserve(cr.Len())
vs := cr.Times(j)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
b.Append(vs.Value(i))
} else {
b.AppendNull()
}
}
default:
return errors.New("implement me")
}
}
return nil
}); err != nil {
return nil, err
}
// Construct each of the columns and then store the table buffer.
columns := make(map[string]*tableColumn, len(builders))
for label, bc := range builders {
columns[label] = &tableColumn{
Type: bc.Type,
Values: bc.Builder.NewArray(),
}
bc.Builder.Release()
}
return &tableBuffer{
id: id,
columns: columns,
sz: sz,
}, nil
}
func createDiffTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) {
if len(a.Parents()) != 2 {
return nil, nil, errors.New("diff should have exactly 2 parents")
}
cache := execute.NewTableBuilderCache(a.Allocator())
dataset := execute.NewDataset(id, mode, cache)
pspec, ok := spec.(*DiffProcedureSpec)
if !ok {
return nil, nil, fmt.Errorf("invalid spec type %T", pspec)
}
transform := NewDiffTransformation(dataset, cache, pspec, a.Parents()[0], a.Parents()[1], a.Allocator())
return transform, dataset, nil
}
func NewDiffTransformation(d execute.Dataset, cache execute.TableBuilderCache, spec *DiffProcedureSpec, wantID, gotID execute.DatasetID, a *memory.Allocator) *DiffTransformation {
return &DiffTransformation{
wantID: wantID,
gotID: gotID,
d: d,
cache: cache,
inputCache: execute.NewGroupLookup(),
finished: make(map[execute.DatasetID]bool, 2),
}
}
func (t *DiffTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error {
panic("implement me")
}
func (t *DiffTransformation) Process(id execute.DatasetID, tbl flux.Table) error {
t.mu.Lock()
defer t.mu.Unlock()
// If one of the tables finished with an error, it is possible
// to prematurely declare the other table as finished so we
// don't do more work on something that failed anyway.
if t.finished[id] {
return nil
}
// Copy the table we are processing into a buffer.
// This may or may not be the want table. We fix that later.
want, err := copyTable(id, tbl)
if err != nil {
return err
}
// Look in the input cache for a table buffer.
var got *tableBuffer
if obj, ok := t.inputCache.Delete(tbl.Key()); !ok {
// We did not find an entry. If the other table has
// not been finished, we need to store this table
// for later usage.
if len(t.finished) != 1 || !t.finished[id] {
t.inputCache.Set(tbl.Key(), want)
return nil
}
// The other table has been finished so we can construct
// this table immediately. Generate an empty table buffer.
got = &tableBuffer{}
} else {
// Otherwise, we assign the stored table buffer to got
// so we can generate the diff.
got = obj.(*tableBuffer)
}
// If the want table does not match the want id, we need to swap
// the tables. We use want here instead of got because goot
// may be a pseudo-table we created above and we only need to
// test one of them.
if want.id != t.wantID {
got, want = want, got
}
return t.diff(tbl.Key(), want, got)
}
func (t *DiffTransformation) createSchema(builder execute.TableBuilder, want, got *tableBuffer) (diffIdx int, colMap map[string]int, err error) {
// Construct the table schema by adding columns for the table key
// (which, by definition, cannot be different at this point),
// a _diff column for the marker, and then the columns for each
// of the value types in alphabetical order.
if err := execute.AddTableKeyCols(builder.Key(), builder); err != nil {
return 0, nil, err
}
diffIdx, err = builder.AddCol(flux.ColMeta{
Label: "_diff",
Type: flux.TString,
})
if err != nil {
return 0, nil, err
}
// Determine all of the column names and their types.
colTypes := make(map[string]flux.ColType)
for label, col := range want.columns {
colTypes[label] = col.Type
}
for label, col := range got.columns {
if typ, ok := colTypes[label]; ok && typ != col.Type {
return 0, nil, fmt.Errorf("column types differ: want=%s got=%s", typ, col.Type)
} else if !ok {
colTypes[label] = col.Type
}
}
labels := make([]string, 0, len(colTypes))
for label := range colTypes {
labels = append(labels, label)
}
sort.Strings(labels)
// Now construct the schema and mark the column ids.
colMap = make(map[string]int)
for _, label := range labels {
idx, err := builder.AddCol(flux.ColMeta{
Label: label,
Type: colTypes[label],
})
if err != nil {
return 0, nil, err
}
colMap[label] = idx
}
return diffIdx, colMap, nil
}
func (t *DiffTransformation) diff(key flux.GroupKey, want, got *tableBuffer) error {
// Find the smallest size for the tables. We will only iterate
// over these rows.
sz := want.sz
if got.sz < sz {
sz = got.sz
}
// Look for the first row that is unequal. This is only needed
// if the sizes are the same.
i := 0
if want.sz == got.sz {
for ; i < sz; i++ {
if eq := t.rowEqual(want, got, i); !eq {
break
}
}
// The tables are equal.
if i == sz {
return nil
}
}
// This diff algorithm is not really a smart diff. We may want to
// fix that in the future and we reserve the right to do that, but
// this will just check the first row of one table with the first
// row of the other.
// First, construct an output table.
builder, created := t.cache.TableBuilder(key)
if !created {
return errors.New("duplicate table key")
}
diffIdx, columnIdxs, err := t.createSchema(builder, want, got)
if err != nil {
return err
}
for ; i < sz; i++ {
if eq := t.rowEqual(want, got, i); !eq {
if err := t.appendRow(builder, i, diffIdx, "-", want, columnIdxs); err != nil {
return err
}
if err := t.appendRow(builder, i, diffIdx, "+", got, columnIdxs); err != nil {
return err
}
}
}
// Append the remainder of the rows.
for i := sz; i < want.sz; i++ {
if err := t.appendRow(builder, i, diffIdx, "-", want, columnIdxs); err != nil {
return err
}
}
for i := sz; i < got.sz; i++ {
if err := t.appendRow(builder, i, diffIdx, "+", got, columnIdxs); err != nil {
return err
}
}
return nil
}
func (t *DiffTransformation) rowEqual(want, got *tableBuffer, i int) bool {
if len(want.columns) != len(got.columns) {
return false
}
for label, wantCol := range want.columns {
gotCol, ok := got.columns[label]
if !ok {
return false
}
if wantCol.Values.IsValid(i) != gotCol.Values.IsValid(i) {
return false
} else if wantCol.Values.IsNull(i) {
continue
}
switch wantCol.Type {
case flux.TFloat:
want, got := wantCol.Values.(*array.Float64), gotCol.Values.(*array.Float64)
if want.Value(i) != got.Value(i) {
return false
}
case flux.TInt:
want, got := wantCol.Values.(*array.Int64), gotCol.Values.(*array.Int64)
if want.Value(i) != got.Value(i) {
return false
}
case flux.TUInt:
want, got := wantCol.Values.(*array.Uint64), gotCol.Values.(*array.Uint64)
if want.Value(i) != got.Value(i) {
return false
}
case flux.TString:
want, got := wantCol.Values.(*array.Binary), gotCol.Values.(*array.Binary)
if !bytes.Equal(want.Value(i), got.Value(i)) {
return false
}
case flux.TBool:
want, got := wantCol.Values.(*array.Boolean), gotCol.Values.(*array.Boolean)
if want.Value(i) != got.Value(i) {
return false
}
case flux.TTime:
want, got := wantCol.Values.(*array.Int64), gotCol.Values.(*array.Int64)
if want.Value(i) != got.Value(i) {
return false
}
default:
return false
}
}
return true
}
func (t *DiffTransformation) appendRow(builder execute.TableBuilder, i, diffIdx int, diff string, tbl *tableBuffer, colMap map[string]int) error {
// Add the want column first.
if err := execute.AppendKeyValues(builder.Key(), builder); err != nil {
return err
}
// Add the diff column.
if err := builder.AppendString(diffIdx, diff); err != nil {
return err
}
// Add all of the values.
for label, j := range colMap {
col, ok := tbl.columns[label]
if !ok || col.Values.IsNull(i) {
if err := builder.AppendNil(j); err != nil {
return err
}
continue
}
switch col.Type {
case flux.TFloat:
vs := col.Values.(*array.Float64)
if err := builder.AppendFloat(j, vs.Value(i)); err != nil {
return err
}
case flux.TInt:
vs := col.Values.(*array.Int64)
if err := builder.AppendInt(j, vs.Value(i)); err != nil {
return err
}
case flux.TUInt:
vs := col.Values.(*array.Uint64)
if err := builder.AppendUInt(j, vs.Value(i)); err != nil {
return err
}
case flux.TString:
vs := col.Values.(*array.Binary)
if err := builder.AppendString(j, vs.ValueString(i)); err != nil {
return err
}
case flux.TBool:
vs := col.Values.(*array.Boolean)
if err := builder.AppendBool(j, vs.Value(i)); err != nil {
return err
}
case flux.TTime:
vs := col.Values.(*array.Int64)
if err := builder.AppendTime(j, execute.Time(vs.Value(i))); err != nil {
return err
}
}
}
return nil
}
func (t *DiffTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error {
t.mu.Lock()
defer t.mu.Unlock()
return t.d.UpdateWatermark(mark)
}
func (t *DiffTransformation) UpdateProcessingTime(id execute.DatasetID, mark execute.Time) error {
t.mu.Lock()
defer t.mu.Unlock()
return t.d.UpdateProcessingTime(mark)
}
func (t *DiffTransformation) Finish(id execute.DatasetID, err error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.finished[id] {
return
}
t.finished[id] = true
// An error occurred upstream which makes all of our work needless.
// Declare both of the ids as finished and flush the table builder.
if err != nil {
t.finished[t.wantID] = true
t.finished[t.gotID] = true
t.d.Finish(err)
return
} else if len(t.finished) < 2 {
// Both parents need to finish before we flush out the remainder.
return
}
// There will be no more tables so any tables we have should
// have a table created with a diff for every line since all
// of them are missing.
t.inputCache.Range(func(key flux.GroupKey, value interface{}) {
if err != nil {
return
}
var got, want *tableBuffer
if obj := value.(*tableBuffer); obj.id == t.wantID {
want, got = obj, &tableBuffer{}
} else {
want, got = &tableBuffer{}, obj
}
err = t.diff(key, want, got)
})
t.d.Finish(err)
}
| 1 | 9,923 | Where does the `alloc` field get set? | influxdata-flux | go |
@@ -273,9 +273,10 @@ func setupPostgres(w *DWH) error {
return nil
}
-func runQueryPostgres(db *sql.DB, opts *queryOpts) (*sql.Rows, string, error) {
+func runQueryPostgres(db *sql.DB, opts *queryOpts) (*sql.Rows, int64, error) {
var (
query = fmt.Sprintf("SELECT * FROM %s %s", opts.table, opts.selectAs)
+ countQuery = fmt.Sprintf("SELECT count(*) FROM %s %s", opts.table, opts.selectAs)
conditions []string
values []interface{}
numFilters = 1 | 1 | package dwh
import (
"database/sql"
"fmt"
"strings"
"github.com/pkg/errors"
pb "github.com/sonm-io/core/proto"
)
var (
postgresSetupCommands = map[string]string{
"createTableDeals": `
CREATE TABLE IF NOT EXISTS Deals (
Id TEXT UNIQUE NOT NULL,
SupplierID TEXT NOT NULL,
ConsumerID TEXT NOT NULL,
MasterID TEXT NOT NULL,
AskID TEXT NOT NULL,
BidID TEXT NOT NULL,
Duration INTEGER NOT NULL,
Price TEXT NOT NULL,
StartTime INTEGER NOT NULL,
EndTime INTEGER NOT NULL,
Status INTEGER NOT NULL,
BlockedBalance TEXT NOT NULL,
TotalPayout TEXT NOT NULL,
LastBillTS INTEGER NOT NULL,
Netflags INTEGER NOT NULL,
AskIdentityLevel INTEGER NOT NULL,
BidIdentityLevel INTEGER NOT NULL,
SupplierCertificates BYTEA NOT NULL,
ConsumerCertificates BYTEA NOT NULL,
ActiveChangeRequest BOOLEAN NOT NULL,
CPUSysbenchMulti BIGINT NOT NULL,
CPUSysbenchOne BIGINT NOT NULL,
CPUCores BIGINT NOT NULL,
RAMSize BIGINT NOT NULL,
StorageSize BIGINT NOT NULL,
NetTrafficIn BIGINT NOT NULL,
NetTrafficOut BIGINT NOT NULL,
GPUCount BIGINT NOT NULL,
GPUMem BIGINT NOT NULL,
GPUEthHashrate BIGINT NOT NULL,
GPUCashHashrate BIGINT NOT NULL,
GPURedshift BIGINT NOT NULL
)`,
"createTableDealConditions": `
CREATE TABLE IF NOT EXISTS DealConditions (
Id BIGSERIAL PRIMARY KEY,
SupplierID TEXT NOT NULL,
ConsumerID TEXT NOT NULL,
MasterID TEXT NOT NULL,
Duration INTEGER NOT NULL,
Price TEXT NOT NULL,
StartTime INTEGER NOT NULL,
EndTime INTEGER NOT NULL,
TotalPayout TEXT NOT NULL,
DealID TEXT NOT NULL REFERENCES Deals(Id) ON DELETE CASCADE
)`,
"createTableDealPayments": `
CREATE TABLE IF NOT EXISTS DealPayments (
BillTS INTEGER NOT NULL,
PaidAmount TEXT NOT NULL,
DealID TEXT NOT NULL REFERENCES Deals(Id) ON DELETE CASCADE,
UNIQUE (BillTS, PaidAmount, DealID)
)`,
"createTableChangeRequests": `
CREATE TABLE IF NOT EXISTS DealChangeRequests (
Id TEXT UNIQUE NOT NULL,
CreatedTS INTEGER NOT NULL,
RequestType TEXT NOT NULL,
Duration INTEGER NOT NULL,
Price TEXT NOT NULL,
Status INTEGER NOT NULL,
DealID TEXT NOT NULL REFERENCES Deals(Id) ON DELETE CASCADE
)`,
"createTableOrders": `
CREATE TABLE IF NOT EXISTS Orders (
Id TEXT UNIQUE NOT NULL,
CreatedTS INTEGER NOT NULL,
DealID TEXT NOT NULL,
Type INTEGER NOT NULL,
Status INTEGER NOT NULL,
AuthorID TEXT NOT NULL,
CounterpartyID TEXT NOT NULL,
Duration BIGINT NOT NULL,
Price TEXT NOT NULL,
Netflags INTEGER NOT NULL,
IdentityLevel INTEGER NOT NULL,
Blacklist TEXT NOT NULL,
Tag BYTEA NOT NULL,
FrozenSum TEXT NOT NULL,
CreatorIdentityLevel INTEGER NOT NULL,
CreatorName TEXT NOT NULL,
CreatorCountry TEXT NOT NULL,
CreatorCertificates BYTEA NOT NULL,
CPUSysbenchMulti BIGINT NOT NULL,
CPUSysbenchOne BIGINT NOT NULL,
CPUCores BIGINT NOT NULL,
RAMSize BIGINT NOT NULL,
StorageSize BIGINT NOT NULL,
NetTrafficIn BIGINT NOT NULL,
NetTrafficOut BIGINT NOT NULL,
GPUCount BIGINT NOT NULL,
GPUMem BIGINT NOT NULL,
GPUEthHashrate BIGINT NOT NULL,
GPUCashHashrate BIGINT NOT NULL,
GPURedshift BIGINT NOT NULL
)`,
"createTableWorkers": `
CREATE TABLE IF NOT EXISTS Workers (
MasterID TEXT NOT NULL,
WorkerID TEXT NOT NULL,
Confirmed INTEGER NOT NULL,
UNIQUE (MasterID, WorkerID)
)`,
"createTableBlacklists": `
CREATE TABLE IF NOT EXISTS Blacklists (
AdderID TEXT NOT NULL,
AddeeID TEXT NOT NULL,
UNIQUE (AdderID, AddeeID)
)`,
"createTableValidators": `
CREATE TABLE IF NOT EXISTS Validators (
Id TEXT UNIQUE NOT NULL,
Level INTEGER NOT NULL
)`,
"createTableCertificates": `
CREATE TABLE IF NOT EXISTS Certificates (
OwnerID TEXT NOT NULL,
Attribute INTEGER NOT NULL,
AttributeLevel INTEGER NOT NULL,
Value BYTEA NOT NULL,
ValidatorID TEXT NOT NULL REFERENCES Validators(Id) ON DELETE CASCADE,
UNIQUE (OwnerID, ValidatorID, Attribute, Value)
)`,
"createTableProfiles": `
CREATE TABLE IF NOT EXISTS Profiles (
Id BIGSERIAL PRIMARY KEY,
UserID TEXT UNIQUE NOT NULL,
IdentityLevel INTEGER NOT NULL,
Name TEXT NOT NULL,
Country TEXT NOT NULL,
IsCorporation BOOLEAN NOT NULL,
IsProfessional BOOLEAN NOT NULL,
Certificates BYTEA NOT NULL,
ActiveAsks INTEGER NOT NULL,
ActiveBids INTEGER NOT NULL
)`,
"createTableMisc": `
CREATE TABLE IF NOT EXISTS Misc (
Id BIGSERIAL PRIMARY KEY,
LastKnownBlock INTEGER NOT NULL
)`,
}
postgresCreateIndex = "CREATE INDEX IF NOT EXISTS %s_%s ON %s (%s)"
postgresCommands = map[string]string{
"insertDeal": `INSERT INTO Deals VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32)`,
"updateDeal": `UPDATE Deals SET Duration = $1, Price = $2, StartTime = $3, EndTime = $4, Status = $5, BlockedBalance = $6, TotalPayout = $7, LastBillTS = $7 WHERE Id = $8`,
"updateDealsSupplier": `UPDATE Deals SET SupplierCertificates = $1 WHERE SupplierID = $2`,
"updateDealsConsumer": `UPDATE Deals SET ConsumerCertificates = $1 WHERE ConsumerID = $2`,
"selectDealByID": `SELECT * FROM Deals WHERE id = $1`,
"deleteDeal": `DELETE FROM Deals WHERE Id = $1`,
"insertOrder": `INSERT INTO Orders VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30)`,
"selectOrderByID": `SELECT * FROM Orders WHERE id = $1`,
"updateOrders": `UPDATE Orders SET CreatorIdentityLevel = $1, CreatorName = $2, CreatorCountry = $3, CreatorCertificates = $4 WHERE AuthorID = $5`,
"deleteOrder": `DELETE FROM Orders WHERE Id = $1`,
"insertDealChangeRequest": `INSERT INTO DealChangeRequests VALUES ($1, $2, $3, $4, $5, $6, $7)`,
"selectDealChangeRequests": `SELECT * FROM DealChangeRequests WHERE DealID = $1 AND RequestType = $2 AND Status = $3`,
"selectDealChangeRequestsByID": `SELECT * FROM DealChangeRequests WHERE DealID = $1`,
"deleteDealChangeRequest": `DELETE FROM DealChangeRequests WHERE Id = $1`,
"updateDealChangeRequest": `UPDATE DealChangeRequests SET Status = $1 WHERE Id = $2`,
"insertDealCondition": `INSERT INTO DealConditions(SupplierID, ConsumerID, MasterID, Duration, Price, StartTime, EndTime, TotalPayout, DealID) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)`,
"updateDealConditionPayout": `UPDATE DealConditions SET TotalPayout = $1 WHERE Id = $2`,
"updateDealConditionEndTime": `UPDATE DealConditions SET EndTime = $1 WHERE Id = $2`,
"insertDealPayment": `INSERT INTO DealPayments VALUES ($1, $2, $3)`,
"insertWorker": `INSERT INTO Workers VALUES ($1, $2, $3)`,
"updateWorker": `UPDATE Workers SET Confirmed = $1 WHERE MasterID = $2 AND WorkerID = $3`,
"deleteWorker": `DELETE FROM Workers WHERE MasterID = $1 AND WorkerID = $2`,
"insertBlacklistEntry": `INSERT INTO Blacklists VALUES ($1, $2)`,
"selectBlacklists": `SELECT * FROM Blacklists WHERE AdderID = $1`,
"deleteBlacklistEntry": `DELETE FROM Blacklists WHERE AdderID = $1 AND AddeeID = $2`,
"insertValidator": `INSERT INTO Validators VALUES ($1, $2)`,
"updateValidator": `UPDATE Validators SET Level = $1 WHERE Id = $2`,
"insertCertificate": `INSERT INTO Certificates VALUES ($1, $2, $3, $4, $5)`,
"selectCertificates": `SELECT * FROM Certificates WHERE OwnerID = $1`,
"insertProfileUserID": `INSERT INTO Profiles (UserID, IdentityLevel, Name, Country, IsCorporation, IsProfessional, Certificates, ActiveAsks, ActiveBids ) VALUES ($1, 0, '', '', FALSE, FALSE, E'\\000', $2, $3)`,
"selectProfileByID": `SELECT * FROM Profiles WHERE UserID = $1`,
"profileNotInBlacklist": `AND UserID NOT IN (SELECT AddeeID FROM Blacklists WHERE AdderID = $ AND AddeeID = p.UserID)`,
"profileInBlacklist": `AND UserID IN (SELECT AddeeID FROM Blacklists WHERE AdderID = $ AND AddeeID = p.UserID)`,
"updateProfile": `UPDATE Profiles SET %s = $1 WHERE UserID = $2`,
"selectLastKnownBlock": `SELECT LastKnownBlock FROM Misc WHERE Id = 1`,
"insertLastKnownBlock": `INSERT INTO Misc(LastKnownBlock) VALUES ($1)`,
"updateLastKnownBlock": `UPDATE Misc SET LastKnownBlock = $1 WHERE Id = 1`,
}
)
func setupPostgres(w *DWH) error {
db, err := sql.Open(w.cfg.Storage.Backend, w.cfg.Storage.Endpoint)
if err != nil {
return err
}
defer func() {
if err != nil {
db.Close()
}
}()
for _, cmdName := range orderedSetupCommands {
_, err = db.Exec(postgresSetupCommands[cmdName])
if err != nil {
return errors.Wrapf(err, "failed to %s (%s)", cmdName, w.cfg.Storage.Backend)
}
}
for column := range DealsColumns {
if err = createIndex(db, postgresCreateIndex, "Deals", column); err != nil {
return err
}
}
for _, column := range []string{"Id", "DealID", "RequestType", "Status"} {
if err = createIndex(db, postgresCreateIndex, "DealChangeRequests", column); err != nil {
return err
}
}
for column := range DealConditionsColumns {
if err = createIndex(db, postgresCreateIndex, "DealConditions", column); err != nil {
return err
}
}
for column := range OrdersColumns {
if err = createIndex(db, postgresCreateIndex, "Orders", column); err != nil {
return err
}
}
for _, column := range []string{"MasterID", "WorkerID"} {
if err = createIndex(db, postgresCreateIndex, "Workers", column); err != nil {
return err
}
}
for _, column := range []string{"AdderID", "AddeeID"} {
if err = createIndex(db, postgresCreateIndex, "Blacklists", column); err != nil {
return err
}
}
if err = createIndex(db, postgresCreateIndex, "Validators", "Id"); err != nil {
return err
}
if err = createIndex(db, postgresCreateIndex, "Certificates", "OwnerID"); err != nil {
return err
}
for column := range ProfilesColumns {
if err = createIndex(db, postgresCreateIndex, "Profiles", column); err != nil {
return err
}
}
w.db = db
w.commands = postgresCommands
w.runQuery = runQueryPostgres
return nil
}
func runQueryPostgres(db *sql.DB, opts *queryOpts) (*sql.Rows, string, error) {
var (
query = fmt.Sprintf("SELECT * FROM %s %s", opts.table, opts.selectAs)
conditions []string
values []interface{}
numFilters = 1
)
for idx, filter := range opts.filters {
var condition string
if filter.OpenBracket {
condition += "("
}
condition += fmt.Sprintf("%s%s$%d", filter.Field, filter.CmpOperator, numFilters)
if filter.CloseBracket {
condition += ")"
}
if idx != len(opts.filters)-1 {
condition += fmt.Sprintf(" %s", filter.BoolOperator)
}
conditions = append(conditions, condition)
values = append(values, filter.Value)
numFilters++
}
if len(conditions) > 0 {
if opts.customFilter != nil {
clause := strings.Replace(opts.customFilter.clause, "$", fmt.Sprintf("$%d", numFilters), 1)
conditions = append(conditions, clause)
values = append(values, opts.customFilter.values...)
}
query += " WHERE " + strings.Join(conditions, " ")
}
if opts.limit > MaxLimit || opts.limit == 0 {
opts.limit = MaxLimit
}
if len(opts.sortings) > 0 {
query += fmt.Sprintf(" ORDER BY ")
var sortsFlat []string
for _, sort := range opts.sortings {
sortsFlat = append(sortsFlat, fmt.Sprintf("%s %s", sort.Field, pb.SortingOrder_name[int32(sort.Order)]))
}
query += strings.Join(sortsFlat, ", ")
}
query += fmt.Sprintf(" LIMIT %d", opts.limit)
if opts.offset > 0 {
query += fmt.Sprintf(" OFFSET %d", opts.offset)
}
query += ";"
rows, err := db.Query(query, values...)
if err != nil {
return nil, query, errors.Wrapf(err, "query `%s` failed", query)
}
return rows, query, nil
}
| 1 | 6,901 | looks expensive to do it on each query | sonm-io-core | go |
@@ -38,16 +38,16 @@ import model_params
_LOGGER = logging.getLogger(__name__)
_INPUT_DATA_FILE = resource_filename(
- "nupic.datafiles", "extra/nyctaxi/nyc_taxi.csv"
+ "nupic.datafiles", "extra/nyctaxi/nycTaxi.csv"
)
_OUTPUT_PATH = "anomaly_scores.csv"
_ANOMALY_THRESHOLD = 0.9
-# minimum metric value of nyc_taxi.csv
+# minimum metric value of nycTaxi.csv
_INPUT_MIN = 8
-# maximum metric value of nyc_taxi.csv
+# maximum metric value of nycTaxi.csv
_INPUT_MAX = 39197
| 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple client to create a HTM anomaly detection model for nyctaxi dataset.
The script prints out all records that have an abnormally high anomaly
score.
"""
import csv
import datetime
import logging
from pkg_resources import resource_filename
from nupic.frameworks.opf.model_factory import ModelFactory
import model_params
_LOGGER = logging.getLogger(__name__)
_INPUT_DATA_FILE = resource_filename(
"nupic.datafiles", "extra/nyctaxi/nyc_taxi.csv"
)
_OUTPUT_PATH = "anomaly_scores.csv"
_ANOMALY_THRESHOLD = 0.9
# minimum metric value of nyc_taxi.csv
_INPUT_MIN = 8
# maximum metric value of nyc_taxi.csv
_INPUT_MAX = 39197
def _setRandomEncoderResolution(minResolution=0.001):
"""
Given model params, figure out the correct resolution for the
RandomDistributed encoder. Modifies params in place.
"""
encoder = (
model_params.MODEL_PARAMS["modelParams"]["sensorParams"]["encoders"]["value"]
)
if encoder["type"] == "RandomDistributedScalarEncoder":
rangePadding = abs(_INPUT_MAX - _INPUT_MIN) * 0.2
minValue = _INPUT_MIN - rangePadding
maxValue = _INPUT_MAX + rangePadding
resolution = max(minResolution,
(maxValue - minValue) / encoder.pop("numBuckets")
)
encoder["resolution"] = resolution
def createModel():
_setRandomEncoderResolution()
return ModelFactory.create(model_params.MODEL_PARAMS)
def runNYCTaxiAnomaly():
model = createModel()
model.enableInference({'predictedField': 'value'})
with open (_INPUT_DATA_FILE) as fin:
reader = csv.reader(fin)
csvWriter = csv.writer(open(_OUTPUT_PATH,"wb"))
csvWriter.writerow(["timestamp", "value", "anomaly_score"])
headers = reader.next()
for i, record in enumerate(reader, start=1):
modelInput = dict(zip(headers, record))
modelInput["value"] = float(modelInput["value"])
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%Y-%m-%d %H:%M:%S")
result = model.run(modelInput)
anomalyScore = result.inferences['anomalyScore']
csvWriter.writerow([modelInput["timestamp"], modelInput["value"],
"%.3f" % anomalyScore])
if anomalyScore > _ANOMALY_THRESHOLD:
_LOGGER.info("Anomaly detected at [%s]. Anomaly score: %f.",
result.rawInput["timestamp"], anomalyScore)
print "Anomaly scores have been written to",_OUTPUT_PATH
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
runNYCTaxiAnomaly()
| 1 | 22,208 | @rhyolight -- but weren't we supposed to be standardizing on using underscores in file names (versus camelcase)? | numenta-nupic | py |
@@ -209,9 +209,9 @@ module Mongoid
# @since 2.1.0
def empty?
if _loaded?
- in_memory.count == 0
+ in_memory.length == 0
else
- _unloaded.count + _added.count == 0
+ _added.length == 0 && !_unloaded.exists?
end
end
| 1 | # frozen_string_literal: true
# encoding: utf-8
module Mongoid
module Association
module Referenced
class HasMany
# This class is the wrapper for all referenced associations that have a
# target that can be a criteria or array of _loaded documents. This
# handles both cases or a combination of the two.
class Enumerable
extend Forwardable
include ::Enumerable
# The three main instance variables are collections of documents.
#
# @attribute [rw] _added Documents that have been appended.
# @attribute [rw] _loaded Persisted documents that have been _loaded.
# @attribute [rw] _unloaded A criteria representing persisted docs.
attr_accessor :_added, :_loaded, :_unloaded
def_delegators [], :is_a?, :kind_of?
# Check if the enumerable is equal to the other object.
#
# @example Check equality.
# enumerable == []
#
# @param [ Enumerable ] other The other enumerable.
#
# @return [ true, false ] If the objects are equal.
#
# @since 2.1.0
def ==(other)
return false unless other.respond_to?(:entries)
entries == other.entries
end
# Check equality of the enumerable against the provided object for case
# statements.
#
# @example Check case equality.
# enumerable === Array
#
# @param [ Object ] other The object to check.
#
# @return [ true, false ] If the objects are equal in a case.
#
# @since 3.1.4
def ===(other)
other.class == Class ? (Array == other || Enumerable == other) : self == other
end
# Append a document to the enumerable.
#
# @example Append the document.
# enumerable << document
#
# @param [ Document ] document The document to append.
#
# @return [ Document ] The document.
#
# @since 2.1.0
def <<(document)
_added[document._id] = document
self
end
alias :push :<<
# Clears out all the documents in this enumerable. If passed a block it
# will yield to each document that is in memory.
#
# @example Clear out the enumerable.
# enumerable.clear
#
# @example Clear out the enumerable with a block.
# enumerable.clear do |doc|
# doc.unbind
# end
#
# @return [ Array<Document> ] The cleared out _added docs.
#
# @since 2.1.0
def clear
if block_given?
in_memory { |doc| yield(doc) }
end
_loaded.clear and _added.clear
end
# Clones each document in the enumerable.
#
# @note This loads all documents into memory.
#
# @example Clone the enumerable.
# enumerable.clone
#
# @return [ Array<Document> ] An array clone of the enumerable.
#
# @since 2.1.6
def clone
collect { |doc| doc.clone }
end
# Delete the supplied document from the enumerable.
#
# @example Delete the document.
# enumerable.delete(document)
#
# @param [ Document ] document The document to delete.
#
# @return [ Document ] The deleted document.
#
# @since 2.1.0
def delete(document)
doc = (_loaded.delete(document._id) || _added.delete(document._id))
unless doc
if _unloaded && _unloaded.where(_id: document._id).exists?
yield(document) if block_given?
return document
end
end
yield(doc) if block_given?
doc
end
# Deletes every document in the enumerable for where the block returns
# true.
#
# @note This operation loads all documents from the database.
#
# @example Delete all matching documents.
# enumerable.delete_if do |doc|
# dod._id == _id
# end
#
# @return [ Array<Document> ] The remaining docs.
#
# @since 2.1.0
def delete_if(&block)
load_all!
deleted = in_memory.select(&block)
deleted.each do |doc|
_loaded.delete(doc._id)
_added.delete(doc._id)
end
self
end
# Iterating over this enumerable has to handle a few different
# scenarios.
#
# If the enumerable has its criteria _loaded into memory then it yields
# to all the _loaded docs and all the _added docs.
#
# If the enumerable has not _loaded the criteria then it iterates over
# the cursor while loading the documents and then iterates over the
# _added docs.
#
# If no block is passed then it returns an enumerator containing all
# docs.
#
# @example Iterate over the enumerable.
# enumerable.each do |doc|
# puts doc
# end
#
# @example return an enumerator containing all the docs
#
# a = enumerable.each
#
# @return [ true ] That the enumerable is now _loaded.
#
# @since 2.1.0
def each
unless block_given?
return to_enum
end
if _loaded?
_loaded.each_pair do |id, doc|
document = _added.delete(doc._id) || doc
set_base(document)
yield(document)
end
else
unloaded_documents.each do |doc|
document = _added.delete(doc._id) || _loaded.delete(doc._id) || doc
_loaded[document._id] = document
set_base(document)
yield(document)
end
end
_added.each_pair do |id, doc|
yield(doc)
end
@executed = true
end
# Is the enumerable empty? Will determine if the count is zero based on
# whether or not it is _loaded.
#
# @example Is the enumerable empty?
# enumerable.empty?
#
# @return [ true, false ] If the enumerable is empty.
#
# @since 2.1.0
def empty?
if _loaded?
in_memory.count == 0
else
_unloaded.count + _added.count == 0
end
end
# Returns whether the association has any documents, optionally
# subject to the provided filters.
#
# This method returns true if the association has any persisted
# documents and if it has any not yet persisted documents.
#
# If the association is already loaded, this method inspects the
# loaded documents and does not query the database. If the
# association is not loaded, the argument-less and block-less
# version does not load the association; the other versions
# (that delegate to Enumerable) may or may not load the association
# completely depending on whether it is iterated to completion.
#
# This method can take a parameter and a block. The behavior with
# either the paramater or the block is delegated to the standard
# library Enumerable module.
#
# Note that when Enumerable's any? method is invoked with both
# a block and a pattern, it only uses the pattern.
#
# @param [ Object ] condition The condition that documents
# must satisfy. See Enumerable documentation for details.
#
# @return [ true, false ] If the association has any documents.
def any?(*args)
return super if args.any? || block_given?
if _loaded?
in_memory.length > 0
else
_unloaded.exists? || _added.length > 0
end
end
# Get the first document in the enumerable. Will check the persisted
# documents first. Does not load the entire enumerable.
#
# @example Get the first document.
# enumerable.first
#
# @note Automatically adding a sort on _id when no other sort is
# defined on the criteria has the potential to cause bad performance issues.
# If you experience unexpected poor performance when using #first or #last,
# use the option { id_sort: :none }.
# Be aware that #first/#last won't guarantee order in this case.
#
# @param [ Hash ] opts The options for the query returning the first document.
#
# @option opts [ :none ] :id_sort Don't apply a sort on _id.
#
# @return [ Document ] The first document found.
#
# @since 2.1.0
def first(opts = {})
_loaded.try(:values).try(:first) ||
_added[(ul = _unloaded.try(:first, opts)).try(:_id)] ||
ul ||
_added.values.try(:first)
end
# Initialize the new enumerable either with a criteria or an array.
#
# @example Initialize the enumerable with a criteria.
# Enumberable.new(Post.where(:person_id => id))
#
# @example Initialize the enumerable with an array.
# Enumerable.new([ post ])
#
# @param [ Criteria, Array<Document> ] target The wrapped object.
#
# @since 2.1.0
def initialize(target, base = nil, association = nil)
@_base = base
@_association = association
if target.is_a?(Criteria)
@_added, @executed, @_loaded, @_unloaded = {}, false, {}, target
else
@_added, @executed = {}, true
@_loaded = target.inject({}) do |_target, doc|
_target[doc._id] = doc if doc
_target
end
end
end
# Does the target include the provided document?
#
# @example Does the target include the document?
# enumerable.include?(document)
#
# @param [ Document ] doc The document to check.
#
# @return [ true, false ] If the document is in the target.
#
# @since 3.0.0
def include?(doc)
return super unless _unloaded
_unloaded.where(_id: doc._id).exists? || _added.has_key?(doc._id)
end
# Inspection will just inspect the entries for nice array-style
# printing.
#
# @example Inspect the enumerable.
# enumerable.inspect
#
# @return [ String ] The inspected enum.
#
# @since 2.1.0
def inspect
entries.inspect
end
# Return all the documents in the enumerable that have been _loaded or
# _added.
#
# @note When passed a block it yields to each document.
#
# @example Get the in memory docs.
# enumerable.in_memory
#
# @return [ Array<Document> ] The in memory docs.
#
# @since 2.1.0
def in_memory
docs = (_loaded.values + _added.values)
docs.each do |doc|
yield(doc) if block_given?
end
end
# Get the last document in the enumerable. Will check the new
# documents first. Does not load the entire enumerable.
#
# @example Get the last document.
# enumerable.last
#
# @note Automatically adding a sort on _id when no other sort is
# defined on the criteria has the potential to cause bad performance issues.
# If you experience unexpected poor performance when using #first or #last,
# use the option { id_sort: :none }.
# Be aware that #first/#last won't guarantee order in this case.
#
# @param [ Hash ] opts The options for the query returning the first document.
#
# @option opts [ :none ] :id_sort Don't apply a sort on _id.
#
# @return [ Document ] The last document found.
#
# @since 2.1.0
def last(opts = {})
_added.values.try(:last) ||
_loaded.try(:values).try(:last) ||
_added[(ul = _unloaded.try(:last, opts)).try(:_id)] ||
ul
end
# Loads all the documents in the enumerable from the database.
#
# @example Load all the documents.
# enumerable.load_all!
#
# @return [ true ] That the enumerable is _loaded.
#
# @since 2.1.0
alias :load_all! :entries
# Has the enumerable been _loaded? This will be true if the criteria has
# been executed or we manually load the entire thing.
#
# @example Is the enumerable _loaded?
# enumerable._loaded?
#
# @return [ true, false ] If the enumerable has been _loaded.
#
# @since 2.1.0
def _loaded?
!!@executed
end
# Provides the data needed to Marshal.dump an enumerable proxy.
#
# @example Dump the proxy.
# Marshal.dump(proxy)
#
# @return [ Array<Object> ] The dumped data.
#
# @since 3.0.15
def marshal_dump
[_added, _loaded, _unloaded, @executed]
end
# Loads the data needed to Marshal.load an enumerable proxy.
#
# @example Load the proxy.
# Marshal.load(proxy)
#
# @return [ Array<Object> ] The dumped data.
#
# @since 3.0.15
def marshal_load(data)
@_added, @_loaded, @_unloaded, @executed = data
end
# Reset the enumerable back to its persisted state.
#
# @example Reset the enumerable.
# enumerable.reset
#
# @return [ false ] Always false.
#
# @since 2.1.0
def reset
_loaded.clear
_added.clear
@executed = false
end
# Resets the underlying unloaded criteria object with a new one. Used
# my HABTM associations to keep the underlying array in sync.
#
# @example Reset the unloaded documents.
# enumerable.reset_unloaded(criteria)
#
# @param [ Criteria ] criteria The criteria to replace with.
#
# @since 3.0.14
def reset_unloaded(criteria)
@_unloaded = criteria if _unloaded.is_a?(Criteria)
end
# Does this enumerable respond to the provided method?
#
# @example Does the enumerable respond to the method?
# enumerable.respond_to?(:sum)
#
# @param [ String, Symbol ] name The name of the method.
# @param [ true, false ] include_private Whether to include private
# methods.
#
# @return [ true, false ] Whether the enumerable responds.
#
# @since 2.1.0
def respond_to?(name, include_private = false)
[].respond_to?(name, include_private) || super
end
# Gets the total size of this enumerable. This is a combination of all
# the persisted and unpersisted documents.
#
# @example Get the size.
# enumerable.size
#
# @return [ Integer ] The size of the enumerable.
#
# @since 2.1.0
def size
count = (_unloaded ? _unloaded.count : _loaded.count)
if count.zero?
count + _added.count
else
count + _added.values.count { |d| d.new_record? }
end
end
alias :length :size
# Send #to_json to the entries.
#
# @example Get the enumerable as json.
# enumerable.to_json
#
# @param [ Hash ] options Optional parameters.
#
# @return [ String ] The entries all _loaded as a string.
#
# @since 2.2.0
def to_json(options = {})
entries.to_json(options)
end
# Send #as_json to the entries, without encoding.
#
# @example Get the enumerable as json.
# enumerable.as_json
#
# @param [ Hash ] options Optional parameters.
#
# @return [ Hash ] The entries all _loaded as a hash.
#
# @since 2.2.0
def as_json(options = {})
entries.as_json(options)
end
# Return all the unique documents in the enumerable.
#
# @note This operation loads all documents from the database.
#
# @example Get all the unique documents.
# enumerable.uniq
#
# @return [ Array<Document> ] The unique documents.
#
# @since 2.1.0
def uniq
entries.uniq
end
private
def set_base(document)
if @_association.is_a?(Referenced::HasMany)
document.set_relation(@_association.inverse, @_base) if @_association
end
end
ruby2_keywords def method_missing(name, *args, &block)
entries.send(name, *args, &block)
end
def unloaded_documents
if _unloaded.selector._mongoid_unsatisfiable_criteria?
[]
else
_unloaded
end
end
end
end
end
end
end
| 1 | 13,274 | Can this simply call `in_memory.empty?` ? | mongodb-mongoid | rb |
@@ -88,6 +88,13 @@ func init() {
// CreateSettingsFile creates the settings file (like settings.php) for the
// provided app is the apptype has a settingsCreator function.
func (app *DdevApp) CreateSettingsFile() (string, error) {
+ // If the user has asked us to skip settings file manipulation, then just bail
+ // out early.
+ if app.OmitSettingsPhp {
+ util.Warning("Skipping creation of settings file.")
+ return "", nil
+ }
+
app.SetApptypeSettingsPaths()
// If neither settings file options are set, then don't continue. Return | 1 | package ddevapp
import (
"fmt"
"os"
"path"
"path/filepath"
"github.com/drud/ddev/pkg/util"
)
type settingsCreator func(*DdevApp) (string, error)
type uploadDir func(*DdevApp) string
// hookDefaultComments should probably change its arg from string to app when
// config refactor is done.
type hookDefaultComments func() []byte
type apptypeSettingsPaths func(app *DdevApp)
// appTypeDetect returns true if the app is of the specified type
type appTypeDetect func(app *DdevApp) bool
// postImportDBAction can take actions after import (like warning user about
// required actions on Wordpress.
type postImportDBAction func(app *DdevApp) error
// configOverrideAction allows a particular apptype to override elements
// of the config for that apptype. Key example is drupal6 needing php56
type configOverrideAction func(app *DdevApp) error
// postConfigAction allows actions to take place at the end of ddev config
type postConfigAction func(app *DdevApp) error
// postStartAction allows actions to take place at the end of ddev start
type postStartAction func(app *DdevApp) error
// importFilesAction
type importFilesAction func(app *DdevApp, importPath, extPath string) error
// defaultWorkingDirMap returns the app type's default working directory map
type defaultWorkingDirMap func(app *DdevApp, defaults map[string]string) map[string]string
// AppTypeFuncs struct defines the functions that can be called (if populated)
// for a given appType.
type AppTypeFuncs struct {
settingsCreator
uploadDir
hookDefaultComments
apptypeSettingsPaths
appTypeDetect
postImportDBAction
configOverrideAction
postConfigAction
postStartAction
importFilesAction
defaultWorkingDirMap
}
// appTypeMatrix is a static map that defines the various functions to be called
// for each apptype (CMS).
var appTypeMatrix map[string]AppTypeFuncs
func init() {
appTypeMatrix = map[string]AppTypeFuncs{
AppTypePHP: {},
AppTypeDrupal6: {
settingsCreator: createDrupal6SettingsFile, uploadDir: getDrupalUploadDir, hookDefaultComments: getDrupal6Hooks, apptypeSettingsPaths: setDrupalSiteSettingsPaths, appTypeDetect: isDrupal6App, postImportDBAction: nil, configOverrideAction: drupal6ConfigOverrideAction, postConfigAction: nil, postStartAction: drupal6PostStartAction, importFilesAction: drupalImportFilesAction, defaultWorkingDirMap: docrootWorkingDir,
},
AppTypeDrupal7: {
settingsCreator: createDrupal7SettingsFile, uploadDir: getDrupalUploadDir, hookDefaultComments: getDrupal7Hooks, apptypeSettingsPaths: setDrupalSiteSettingsPaths, appTypeDetect: isDrupal7App, postImportDBAction: nil, configOverrideAction: nil, postConfigAction: nil, postStartAction: drupal7PostStartAction, importFilesAction: drupalImportFilesAction, defaultWorkingDirMap: docrootWorkingDir,
},
AppTypeDrupal8: {
settingsCreator: createDrupal8SettingsFile, uploadDir: getDrupalUploadDir, hookDefaultComments: getDrupal8Hooks, apptypeSettingsPaths: setDrupalSiteSettingsPaths, appTypeDetect: isDrupal8App, postImportDBAction: nil, configOverrideAction: nil, postConfigAction: nil, postStartAction: drupal8PostStartAction, importFilesAction: drupalImportFilesAction, defaultWorkingDirMap: docrootWorkingDir,
},
AppTypeWordPress: {
settingsCreator: createWordpressSettingsFile, uploadDir: getWordpressUploadDir, hookDefaultComments: getWordpressHooks, apptypeSettingsPaths: setWordpressSiteSettingsPaths, appTypeDetect: isWordpressApp, postImportDBAction: nil, configOverrideAction: nil, postConfigAction: nil, postStartAction: wordpressPostStartAction, importFilesAction: wordpressImportFilesAction,
},
AppTypeTYPO3: {
settingsCreator: createTypo3SettingsFile, uploadDir: getTypo3UploadDir, hookDefaultComments: getTypo3Hooks, apptypeSettingsPaths: setTypo3SiteSettingsPaths, appTypeDetect: isTypo3App, postImportDBAction: nil, configOverrideAction: typo3ConfigOverrideAction, postConfigAction: nil, postStartAction: typo3PostStartAction, importFilesAction: typo3ImportFilesAction,
},
AppTypeBackdrop: {
settingsCreator: createBackdropSettingsFile, uploadDir: getBackdropUploadDir, hookDefaultComments: getBackdropHooks, apptypeSettingsPaths: setBackdropSiteSettingsPaths, appTypeDetect: isBackdropApp, postImportDBAction: backdropPostImportDBAction, configOverrideAction: nil, postConfigAction: nil, postStartAction: backdropPostStartAction, importFilesAction: backdropImportFilesAction, defaultWorkingDirMap: docrootWorkingDir,
},
}
}
// CreateSettingsFile creates the settings file (like settings.php) for the
// provided app is the apptype has a settingsCreator function.
func (app *DdevApp) CreateSettingsFile() (string, error) {
app.SetApptypeSettingsPaths()
// If neither settings file options are set, then don't continue. Return
// a nil error because this should not halt execution if the apptype
// does not have a settings definition.
if app.SiteDdevSettingsFile == "" && app.SiteSettingsPath == "" {
util.Warning("Project type has no settings paths configured, so not creating settings file.")
return "", nil
}
// Drupal and WordPress love to change settings files to be unwriteable.
// Chmod them to something we can work with in the event that they already
// exist.
chmodTargets := []string{filepath.Dir(app.SiteSettingsPath), app.SiteDdevSettingsFile}
for _, fp := range chmodTargets {
fileInfo, err := os.Stat(fp)
if err != nil {
// We're not doing anything about this error other than warning,
// and will have to deal with the same check in settingsCreator.
if !os.IsNotExist(err) {
util.Warning("Unable to ensure write permissions: %v", err)
}
continue
}
perms := 0644
if fileInfo.IsDir() {
perms = 0755
}
err = os.Chmod(fp, os.FileMode(perms))
if err != nil {
return "", fmt.Errorf("could not change permissions on file %s to make it writeable: %v", fp, err)
}
}
// If we have a function to do the settings creation, do it, otherwise
// just ignore.
if appFuncs, ok := appTypeMatrix[app.GetType()]; ok && appFuncs.settingsCreator != nil {
settingsPath, err := appFuncs.settingsCreator(app)
if err != nil {
util.Warning("Unable to create settings file: %v", err)
}
if err = CreateGitIgnore(filepath.Dir(app.SiteSettingsPath), filepath.Base(app.SiteDdevSettingsFile), "drushrc.php"); err != nil {
util.Warning("Failed to write .gitignore in %s: %v", filepath.Dir(app.SiteDdevSettingsFile), err)
}
if app.Type == AppTypeDrupal8 {
drushDir := filepath.Join(filepath.Dir(app.SiteSettingsPath), "..", "all", "drush")
if err = CreateGitIgnore(drushDir, "drush.yml"); err != nil {
util.Warning("Failed to write .gitignore in %s: %v", drushDir, err)
}
}
return settingsPath, nil
}
return "", nil
}
// GetUploadDir returns the upload (public files) directory for the given app
func (app *DdevApp) GetUploadDir() string {
if appFuncs, ok := appTypeMatrix[app.GetType()]; ok && appFuncs.uploadDir != nil {
uploadDir := appFuncs.uploadDir(app)
return uploadDir
}
return ""
}
// GetHookDefaultComments gets the actual text of the config.yaml hook suggestions
// for a given apptype
func (app *DdevApp) GetHookDefaultComments() []byte {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.hookDefaultComments != nil {
suggestions := appFuncs.hookDefaultComments()
return suggestions
}
return []byte("")
}
// SetApptypeSettingsPaths chooses and sets the settings.php/settings.local.php
// and related paths for a given app.
func (app *DdevApp) SetApptypeSettingsPaths() {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.apptypeSettingsPaths != nil {
appFuncs.apptypeSettingsPaths(app)
}
}
// DetectAppType calls each apptype's detector until it finds a match,
// or returns 'php' as a last resort.
func (app *DdevApp) DetectAppType() string {
for appName, appFuncs := range appTypeMatrix {
if appFuncs.appTypeDetect != nil && appFuncs.appTypeDetect(app) {
return appName
}
}
return AppTypePHP
}
// PostImportDBAction calls each apptype's detector until it finds a match,
// or returns 'php' as a last resort.
func (app *DdevApp) PostImportDBAction() error {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.postImportDBAction != nil {
return appFuncs.postImportDBAction(app)
}
return nil
}
// ConfigFileOverrideAction gives a chance for an apptype to override any element
// of config.yaml that it needs to (on initial creation, but not after that)
func (app *DdevApp) ConfigFileOverrideAction() error {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.configOverrideAction != nil && !app.ConfigExists() {
return appFuncs.configOverrideAction(app)
}
return nil
}
// PostConfigAction gives a chance for an apptype to override do something at
// the end of ddev config.
func (app *DdevApp) PostConfigAction() error {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.postConfigAction != nil {
return appFuncs.postConfigAction(app)
}
return nil
}
// PostStartAction gives a chance for an apptype to do something after the app
// has been started.
func (app *DdevApp) PostStartAction() error {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.postStartAction != nil {
return appFuncs.postStartAction(app)
}
return nil
}
// ImportFilesAction executes the relevant import files workflow for each app type.
func (app *DdevApp) ImportFilesAction(importPath, extPath string) error {
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.importFilesAction != nil {
return appFuncs.importFilesAction(app, importPath, extPath)
}
return fmt.Errorf("this project type (%s) does not support import-files", app.Type)
}
// DefaultWorkingDirMap returns the app type's default working directory map.
func (app *DdevApp) DefaultWorkingDirMap() map[string]string {
// Default working directory values are defined here.
// Services working directories can be overridden by app types if needed.
defaults := map[string]string{
"web": "/var/www/html/",
"db": "/home",
"dba": "/home",
}
if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.defaultWorkingDirMap != nil {
return appFuncs.defaultWorkingDirMap(app, defaults)
}
return defaults
}
// docrootWorkingDir handles the shared case in which the web service working directory is the docroot.
func docrootWorkingDir(app *DdevApp, defaults map[string]string) map[string]string {
defaults["web"] = path.Join("/var/www/html", app.Docroot)
return defaults
}
| 1 | 13,763 | The styling/wording here probably needs some thought. It's more of a placeholder. | drud-ddev | php |
@@ -9,10 +9,10 @@ Rails.application.routes.draw do
resources :stack_entries
- resources :password_reset, only: [:new, :create] do
+ resources :password_resets, only: [:new, :create] do
collection do
get :confirm
- post :reset
+ patch :reset
end
end
resources :activation_resends, only: [:new, :create] | 1 | Rails.application.routes.draw do
ActiveAdmin.routes(self)
root to: 'home#index'
resources :sessions, only: [:new, :create] do
collection do
delete :destroy
end
end
resources :stack_entries
resources :password_reset, only: [:new, :create] do
collection do
get :confirm
post :reset
end
end
resources :activation_resends, only: [:new, :create]
resources :api_keys, only: :index
resources :domain_blacklists, except: :show
resources :reviews, only: :destroy do
resources :helpfuls, only: :create
end
resources :kudos, only: [:new, :create, :destroy]
resources :people, only: [:index]
resources :edits, only: [:update]
resources :licenses do
resources :edits, only: [:index]
end
resources :tags, only: [:index]
resources :accounts do
resources :api_keys, constraints: { format: :html }, except: :show
resources :projects, only: [:index]
resources :positions, only: [:index] do
collection do
get :one_click_create
end
end
resources :stacks, only: [:index]
resources :account_widgets, path: :widgets, as: :widgets, only: :index do
collection do
get :detailed
get :tiny
get :rank
end
end
resources :kudos, only: [:index, :show] do
collection do
get :sent
end
end
resources :edits, only: [:index]
resources :posts, only: [:index]
resources :reviews, only: [:index]
resources :positions
resources :position_factories, only: :create
member do
get :confirm_delete
get :disabled
get :settings
get :edit_privacy, to: 'privacy#edit', as: :edit_account_privacy
patch :edit_privacy, to: 'privacy#update', as: :account_privacy
end
collection do
get :unsubscribe_emails
end
resources :charts, only: [], module: :accounts do
collection do
get :commits_by_project
get :commits_by_language
end
end
resources :languages, only: :index, module: :accounts
resources :accesses, only: [], module: :accounts do
collection do
post :make_spammer
get :activate
end
end
end
resources :deleted_accounts, only: [:edit, :update]
resources :check_availabilities, only: [] do
collection do
get :account
get :project
get :organization
get :license
end
end
resources :searches, only: [] do
collection do
get :account
end
end
resources :autocompletes, only: [] do
collection do
get :account
get :project
get :licenses
get :contributions
get :tags
end
end
resources :forums do
resources :topics, shallow: true
end
resources :topics, except: [:index, :new, :create] do
resources :posts, except: [:new]
end
resources :posts, only: :index, as: 'all_posts'
get 'markdown_syntax', to: 'abouts#markdown_syntax'
get 'message', to: 'abouts#message'
get 'maintenance', to: 'abouts#maintenance'
get 'tools', to: 'abouts#tools'
get 'explore/projects', to: 'explore#projects', as: :explore_projects
get 'p/compare', to: 'compare#projects', as: :compare_projects
get 'p/graph', to: 'compare#projects_graph', as: :compare_graph_projects
resources :projects, path: :p, except: [:destroy] do
member do
get :users
get :map
get :settings
get :estimated_cost
get :similar_by_tags
get 'permissions' => 'permissions#show', as: :permissions
put 'permissions' => 'permissions#update', as: :update_permissions
post 'rate' => 'ratings#rate', as: :rate
delete 'unrate' => 'ratings#unrate', as: :unrate
end
collection do
post :check_forge
end
resources :contributions, path: :contributors, as: :contributors, only: [:index, :show] do
resources :commits
collection do
get :near
get :summary
end
member do
get :commits_compound_spark
get :commits_spark
end
end
resources :licenses, controller: :project_licenses, only: [:index, :new, :create, :destroy]
resources :tags, controller: :project_tags, only: [:index, :create, :destroy] do
collection do
get :related
get :status
end
end
resources :duplicates, only: [:new, :create, :edit, :update, :destroy]
resource :logos, only: [:new, :create, :destroy]
resources :links, except: :show
resources :managers, only: [:index, :new, :create, :edit, :update] do
member do
post :approve
post :reject
end
end
resources :manages, only: [:new]
resources :edits, only: [:index]
resources :enlistments
resources :factoids, only: [:index]
resources :rss_articles, only: :index
resources :project_widgets, path: :widgets, as: :widgets, only: :index do
collection do
get :factoids
get :factoids_stats
get :basic_stats
get :users
get :users_logo
get :search_code
get :browse_code
get :search_all_code
get :languages
get :partner_badge
get :thin_badge
get :cocomo
end
end
resources :similar_projects, only: :index
resources :ratings
resources :reviews, except: :show do
collection { get :summary }
resources :helpfuls, only: :create
end
resources :analyses, only: [:index, :show] do
member do
get :languages_summary
get :languages
get :licenses
get :top_commit_volume_chart
get :commits_history
get :committer_history
get :contributor_summary
get :language_history
get :code_history
get :lines_of_code
get :commits_spark
end
resources :activity_facts, only: :index
resources :size_facts, only: :index
end
resources :commits, only: [:index, :show] do
collection { get :summary }
member do
get :statistics
get :events
get :event_details
end
end
resources :contributors do
member do
get :event_details
get :events
end
end
resources :stacks, only: [] do
collection { get :near }
end
resources :aliases, only: [:index, :new, :create] do
collection { get :preferred_names }
member do
post :undo
post :redo
end
end
end
resources :organizations, path: :orgs, only: [:index, :show] do
member do
get :settings
get :projects
get :outside_projects
get :outside_committers
get :print_infographic
get :affiliated_committers
end
resources :edits, only: [:index]
resource :logos, only: [:new, :create, :destroy]
resources :managers, only: [:index, :new, :create, :edit, :update] do
member do
post :approve
post :reject
end
end
resources :organization_widgets, path: :widgets, as: :widgets, only: :index do
collection do
get :affiliated_committers_activity
get :open_source_activity
get :portfolio_projects_activity
end
end
end
resources :stacks, only: [:show, :create, :update, :destroy] do
member do
get :similar
get :builder
end
resources :stack_entries, only: [:create, :destroy]
resources :stack_ignores, only: [:create] do
collection do
delete :delete_all
end
end
resources :stack_widgets, path: :widgets, as: :widgets, only: :index do
collection do
get :normal
end
end
end
resources :languages, only: [:show, :index] do
collection { get :compare }
end
resources :people do
collection { get :rankings }
end
resources :contributors, controller: 'contributions' do
resources :invites, only: [:new, :create]
end
get 'explore/orgs' => 'explore#orgs'
get 'explore/orgs_by_thirty_day_commit_volume' => 'explore#orgs_by_thirty_day_commit_volume'
get 'message' => 'home#message'
get 'maintenance' => 'home#maintenance'
get 'repositories/compare' => 'compare_repositories#index', as: :compare_repositories
get 'repositories/chart' => 'compare_repositories#chart', as: :compare_repositories_chart
get 'server_info' => 'home#server_info'
resources :committers, only: [:index, :show] do
member do
post :claim
post :save_claim
end
end
end
| 1 | 7,459 | Using a plural route helps in detecting the path automatically for `= form_for @password_reset`. | blackducksoftware-ohloh-ui | rb |
@@ -9,8 +9,8 @@ const fs = require('fs');
const { MongoClient } = require('../../../src');
const { TestConfiguration } = require('./config');
const { getEnvironmentalOptions } = require('../utils');
-const { eachAsync } = require('../../../src/utils');
const mock = require('../mongodb-mock/index');
+const { inspect } = require('util');
const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017';
const MONGODB_API_VERSION = process.env.MONGODB_API_VERSION; | 1 | 'use strict';
require('source-map-support').install({
hookRequire: true
});
const path = require('path');
const fs = require('fs');
const { MongoClient } = require('../../../src');
const { TestConfiguration } = require('./config');
const { getEnvironmentalOptions } = require('../utils');
const { eachAsync } = require('../../../src/utils');
const mock = require('../mongodb-mock/index');
const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017';
const MONGODB_API_VERSION = process.env.MONGODB_API_VERSION;
// Load balancer fronting 1 mongos.
const SINGLE_MONGOS_LB_URI = process.env.SINGLE_MONGOS_LB_URI;
// Load balancer fronting 2 mongoses.
const MULTI_MONGOS_LB_URI = process.env.MULTI_MONGOS_LB_URI;
const filters = [];
function initializeFilters(client, callback) {
const filterFiles = fs
.readdirSync(path.join(__dirname, 'filters'))
.filter(x => x.indexOf('js') !== -1);
// context object that can be appended to as part of filter initialization
const context = {};
eachAsync(
filterFiles,
(filterName, cb) => {
const FilterModule = require(path.join(__dirname, 'filters', filterName));
const filter = new FilterModule();
if (typeof filter !== 'object') {
cb(new TypeError('Type of filter must be an object'));
return;
}
if (!filter.filter || typeof filter.filter !== 'function') {
cb(new TypeError('Object filters must have a function named filter'));
return;
}
filters.push(filter);
if (typeof filter.initializeFilter === 'function') {
filter.initializeFilter(client, context, cb);
} else {
cb();
}
},
err => callback(err, context)
);
}
function filterOutTests(suite) {
suite.tests = suite.tests.filter(test => filters.every(f => f.filter(test)));
suite.suites.forEach(suite => filterOutTests(suite));
}
before(function (_done) {
// NOTE: if we first parse the connection string and redact auth, then we can reenable this
// const usingUnifiedTopology = !!process.env.MONGODB_UNIFIED_TOPOLOGY;
// console.log(
// `connecting to: ${chalk.bold(MONGODB_URI)} using ${chalk.bold(
// usingUnifiedTopology ? 'unified' : 'legacy'
// )} topology`
// );
const loadBalanced = SINGLE_MONGOS_LB_URI && MULTI_MONGOS_LB_URI;
const client = new MongoClient(
loadBalanced ? SINGLE_MONGOS_LB_URI : MONGODB_URI,
getEnvironmentalOptions()
);
const done = err => client.close(err2 => _done(err || err2));
client.connect(err => {
if (err) {
done(err);
return;
}
initializeFilters(client, (err, context) => {
if (err) {
done(err);
return;
}
// Ensure test MongoClients set a serverApi parameter when required
if (MONGODB_API_VERSION) {
context.serverApi = MONGODB_API_VERSION;
}
if (SINGLE_MONGOS_LB_URI && MULTI_MONGOS_LB_URI) {
context.singleMongosLoadBalancerUri = SINGLE_MONGOS_LB_URI;
context.multiMongosLoadBalancerUri = MULTI_MONGOS_LB_URI;
}
// replace this when mocha supports dynamic skipping with `afterEach`
filterOutTests(this._runnable.parent);
this.configuration = new TestConfiguration(MONGODB_URI, context);
done();
});
});
});
// ensure all mock connections are closed after the suite is run
after(() => mock.cleanup());
// optionally enable test runner-wide plugins
require('./plugins/deferred');
require('./plugins/session_leak_checker');
require('./plugins/client_leak_checker');
// configure mocha
require('mocha-sinon');
| 1 | 21,822 | Is `metadata` required on all tests? I actually just removed the metadata field entirely from a few tests in my PR. | mongodb-node-mongodb-native | js |
@@ -191,7 +191,7 @@ public class FileHandler {
final long copied = Files.copy(from.toPath(), out);
final long length = from.length();
if (copied != length) {
- throw new IOException("Could not transfer all bytes.");
+ throw new IOException("Could not transfer all bytes of " + from.toPath());
}
}
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.io;
import com.google.common.collect.Lists;
import com.google.common.io.Closeables;
import org.openqa.selenium.Platform;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.util.List;
/**
* Utility methods for common filesystem activities
*/
public class FileHandler {
public static File unzip(InputStream resource) throws IOException {
File output = TemporaryFilesystem.getDefaultTmpFS().createTempDir("unzip", "stream");
new Zip().unzip(resource, output);
return output;
}
public static void copyResource(File outputDir, Class<?> forClassLoader, String... names)
throws IOException {
Zip zip = new Zip();
for (String name : names) {
InputStream is = locateResource(forClassLoader, name);
try {
zip.unzipFile(outputDir, is, name);
} finally {
is.close();
}
}
}
private static InputStream locateResource(Class<?> forClassLoader, String name)
throws IOException {
String arch = System.getProperty("os.arch").toLowerCase() + "/";
List<String> alternatives =
Lists.newArrayList(name, "/" + name, arch + name, "/" + arch + name);
if (Platform.getCurrent().is(Platform.MAC)) {
alternatives.add("mac/" + name);
alternatives.add("/mac/" + name);
}
// First look using our own classloader
for (String possibility : alternatives) {
InputStream stream = FileHandler.class.getResourceAsStream(possibility);
if (stream != null) {
return stream;
}
stream = forClassLoader.getResourceAsStream(possibility);
if (stream != null) {
return stream;
}
}
throw new IOException("Unable to locate: " + name);
}
public static boolean createDir(File dir) throws IOException {
if ((dir.exists() || dir.mkdirs()) && dir.canWrite())
return true;
if (dir.exists()) {
FileHandler.makeWritable(dir);
return dir.canWrite();
}
// Iterate through the parent directories until we find that exists,
// then sink down.
return createDir(dir.getParentFile());
}
public static boolean makeWritable(File file) throws IOException {
if (file.canWrite()) {
return true;
}
return file.setWritable(true);
}
public static boolean makeExecutable(File file) throws IOException {
if (canExecute(file)) {
return true;
}
return file.setExecutable(true);
}
public static Boolean canExecute(File file) {
return file.canExecute();
}
public static boolean isZipped(String fileName) {
return fileName.endsWith(".zip") || fileName.endsWith(".xpi");
}
public static boolean delete(File toDelete) {
boolean deleted = true;
if (toDelete.isDirectory()) {
File[] children = toDelete.listFiles();
if (children != null) {
for (File child : children) {
deleted &= child.canWrite() && delete(child);
}
}
}
return deleted && toDelete.canWrite() && toDelete.delete();
}
public static void copy(File from, File to) throws IOException {
copy(from, to, new NoFilter());
}
public static void copy(File source, File dest, String suffix) throws IOException {
copy(source, dest, suffix == null ? new NoFilter() : new FileSuffixFilter(suffix));
}
private static void copy(File source, File dest, Filter onlyCopy) throws IOException {
if (!source.exists()) {
return;
}
if (source.isDirectory()) {
copyDir(source, dest, onlyCopy);
} else {
copyFile(source, dest, onlyCopy);
}
}
private static void copyDir(File from, File to, Filter onlyCopy) throws IOException {
if (!onlyCopy.isRequired(from)) {
return;
}
// Create the target directory.
createDir(to);
// List children.
String[] children = from.list();
if (children == null) {
throw new IOException("Could not copy directory " + from.getPath());
}
for (String child : children) {
if (!".parentlock".equals(child) && !"parent.lock".equals(child)) {
copy(new File(from, child), new File(to, child), onlyCopy);
}
}
}
private static void copyFile(File from, File to, Filter onlyCopy) throws IOException {
if (!onlyCopy.isRequired(from)) {
return;
}
try (OutputStream out = new FileOutputStream(to)) {
final long copied = Files.copy(from.toPath(), out);
final long length = from.length();
if (copied != length) {
throw new IOException("Could not transfer all bytes.");
}
}
}
/**
* Used by file operations to determine whether or not to make use of a file.
*/
public interface Filter {
/**
* @param file File to be considered.
* @return Whether or not to make use of the file in this oprtation.
*/
boolean isRequired(File file);
}
private static class FileSuffixFilter implements Filter {
private final String suffix;
public FileSuffixFilter(String suffix) {
this.suffix = suffix;
}
public boolean isRequired(File file) {
return file.isDirectory() || file.getAbsolutePath().endsWith(suffix);
}
}
private static class NoFilter implements Filter {
public boolean isRequired(File file) {
return true;
}
}
public static String readAsString(File toRead) throws IOException {
Reader reader = null;
try {
reader = new BufferedReader(new FileReader(toRead));
StringBuilder builder = new StringBuilder();
char[] buffer = new char[4096];
int read;
while ((read = reader.read(buffer)) != -1) {
char[] target = new char[read];
System.arraycopy(buffer, 0, target, 0, read);
builder.append(target);
}
return builder.toString();
} finally {
Closeables.close(reader, false);
}
}
}
| 1 | 13,214 | seems reasonable to also want to include the 'to' location? | SeleniumHQ-selenium | js |
@@ -145,6 +145,10 @@ public class TemporaryFilesystem {
}
public boolean deleteBaseDir() {
- return baseDir.delete();
+ boolean wasDeleted = baseDir.delete();
+ if (wasDeleted) {
+ Runtime.getRuntime().removeShutdownHook(shutdownHook);
+ }
+ return wasDeleted;
}
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.io;
import org.openqa.selenium.WebDriverException;
import java.io.File;
import java.io.IOException;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
/**
* A wrapper around temporary filesystem behaviour.
*/
public class TemporaryFilesystem {
private final Set<File> temporaryFiles = new CopyOnWriteArraySet<File>();
private final File baseDir;
private final Thread shutdownHook = new Thread() { // Thread safety reviewed
@Override
public void run() {
deleteTemporaryFiles();
}
};
private static File sysTemp = new File(System.getProperty("java.io.tmpdir"));
private static TemporaryFilesystem instance = new TemporaryFilesystem(sysTemp);
public static TemporaryFilesystem getDefaultTmpFS() {
return instance;
}
public static void setTemporaryDirectory(File directory) {
synchronized (TemporaryFilesystem.class) {
instance = new TemporaryFilesystem(directory);
}
}
public static TemporaryFilesystem getTmpFsBasedOn(File directory) {
return new TemporaryFilesystem(directory);
}
private TemporaryFilesystem(File baseDir) {
this.baseDir = baseDir;
Runtime.getRuntime().addShutdownHook(shutdownHook);
if (!baseDir.exists()) {
throw new WebDriverException("Unable to find tmp dir: " + baseDir.getAbsolutePath());
}
if (!baseDir.canWrite()) {
throw new WebDriverException("Unable to write to tmp dir: " + baseDir.getAbsolutePath());
}
}
/**
* Create a temporary directory, and track it for deletion.
*
* @param prefix the prefix to use when creating the temporary directory
* @param suffix the suffix to use when creating the temporary directory
* @return the temporary directory to create
*/
public File createTempDir(String prefix, String suffix) {
try {
// Create a tempfile, and delete it.
File file = File.createTempFile(prefix, suffix, baseDir);
file.delete();
// Create it as a directory.
File dir = new File(file.getAbsolutePath());
if (!dir.mkdirs()) {
throw new WebDriverException("Cannot create profile directory at " + dir.getAbsolutePath());
}
// Create the directory and mark it writable.
FileHandler.createDir(dir);
temporaryFiles.add(dir);
return dir;
} catch (IOException e) {
throw new WebDriverException(
"Unable to create temporary file at " + baseDir.getAbsolutePath());
}
}
/**
* Delete a temporary directory that we were responsible for creating.
*
* @param file the file to delete
* @throws WebDriverException if interrupted
*/
public void deleteTempDir(File file) {
if (!shouldReap()) {
return;
}
// If the tempfile can be removed, delete it. If not, it wasn't created by us.
if (temporaryFiles.remove(file)) {
FileHandler.delete(file);
}
}
/**
* Perform the operation that a shutdown hook would have.
*/
public void deleteTemporaryFiles() {
if (!shouldReap()) {
return;
}
for (File file : temporaryFiles) {
try {
FileHandler.delete(file);
} catch (WebDriverException e) {
// ignore; an interrupt will already have been logged.
}
}
}
/**
* Returns true if we should be reaping profiles. Used to control tempfile deletion.
*
* @return true if reaping is enabled.
*/
boolean shouldReap() {
String reap = System.getProperty("webdriver.reap_profile", "true");
return Boolean.valueOf(reap);
}
public boolean deleteBaseDir() {
return baseDir.delete();
}
}
| 1 | 13,021 | I don't think we need to necessarily check if that returned true or not, we should just remove the shutdown hook. Since nothing would check or do anything with this flag anyways. | SeleniumHQ-selenium | rb |
@@ -232,7 +232,17 @@ func (c *CVCController) updateCVCObj(
// 4. Create cstorvolumeclaim resource.
// 5. Update the cstorvolumeclaim with claimRef info and bound with cstorvolume.
func (c *CVCController) createVolumeOperation(cvc *apis.CStorVolumeClaim) (*apis.CStorVolumeClaim, error) {
- _ = cvc.Annotations[string(apis.ConfigClassKey)]
+
+ policyName := cvc.Annotations[string(apis.VolumePolicyKey)]
+ volumePolicy := &apis.CStorVolumePolicy{}
+ var err error
+ if policyName != "" {
+ klog.Infof("uses cstorvolume policy for volume configuration")
+ volumePolicy, err = c.clientset.OpenebsV1alpha1().CStorVolumePolicies(getNamespace()).Get(policyName, metav1.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+ }
klog.V(2).Infof("creating cstorvolume service resource")
svcObj, err := getOrCreateTargetService(cvc) | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cstorvolumeclaim
import (
"encoding/json"
"fmt"
"time"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
merrors "github.com/pkg/errors"
"k8s.io/klog"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/util/slice"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a
// cstorvolumeclaim is synced
SuccessSynced = "Synced"
// Provisioning is used as part of the Event 'reason' when a
// cstorvolumeclaim is in provisioning stage
Provisioning = "Provisioning"
// ErrResourceExists is used as part of the Event 'reason' when a
// cstorvolumeclaim fails to sync due to a cstorvolumeclaim of the same
// name already existing.
ErrResourceExists = "ErrResourceExists"
// MessageResourceExists is the message used for Events when a resource
// fails to sync due to a cstorvolumeclaim already existing
MessageResourceExists = "Resource %q already exists and is not managed by CVC"
// MessageResourceSynced is the message used for an Event fired when a
// cstorvolumeclaim is synced successfully
MessageResourceSynced = "cstorvolumeclaim synced successfully"
// MessageResourceCreated msg used for cstor volume provisioning success event
MessageResourceCreated = "cstorvolumeclaim created successfully"
// MessageCVCPublished msg used for cstor volume provisioning publish events
MessageCVCPublished = "cstorvolumeclaim %q must be published/attached on node"
// CStorVolumeClaimFinalizer name of finalizer on CStorVolumeClaim that
// are bound by CStorVolume
CStorVolumeClaimFinalizer = "cvc.openebs.io/finalizer"
)
var knownResizeConditions = map[apis.CStorVolumeClaimConditionType]bool{
apis.CStorVolumeClaimResizing: true,
apis.CStorVolumeClaimResizePending: true,
}
// Patch struct represent the struct used to patch
// the cstorvolumeclaim object
type Patch struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the spcPoolUpdated resource
// with the current status of the resource.
func (c *CVCController) syncHandler(key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing cstorvolumeclaim %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing cstorvolumeclaim %q (%v)", key, time.Since(startTime))
}()
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the cvc resource with this namespace/name
cvc, err := c.cvcLister.CStorVolumeClaims(namespace).Get(name)
if k8serror.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("cstorvolumeclaim '%s' has been deleted", key))
return nil
}
if err != nil {
return err
}
cvcCopy := cvc.DeepCopy()
err = c.syncCVC(cvcCopy)
return err
}
// enqueueCVC takes a CVC resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than CStorVolumeClaims.
func (c *CVCController) enqueueCVC(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
runtime.HandleError(err)
return
}
c.workqueue.Add(key)
/* if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if cvc, ok := obj.(*apis.CStorVolumeClaim); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(cvc)
if err != nil {
klog.Errorf("failed to get key from object: %v, %v", err, cvc)
return
}
klog.V(5).Infof("enqueued %q for sync", objName)
c.workqueue.Add(objName)
}
*/
}
// synCVC is the function which tries to converge to a desired state for the
// CStorVolumeClaims
func (c *CVCController) syncCVC(cvc *apis.CStorVolumeClaim) error {
var err error
// CStor Volume Claim should be deleted. Check if deletion timestamp is set
// and remove finalizer.
if c.isClaimDeletionCandidate(cvc) {
klog.Infof("syncClaim: remove finalizer for CStorVolumeClaimVolume [%s]", cvc.Name)
return c.removeClaimFinalizer(cvc)
}
volName := cvc.Name
if volName == "" {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
runtime.HandleError(fmt.Errorf("%+v: cvc name must be specified", cvc))
return nil
}
nodeID := cvc.Publish.NodeID
if nodeID == "" {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
runtime.HandleError(fmt.Errorf("cvc must be publish/attached to Node: %+v", cvc))
c.recorder.Event(cvc, corev1.EventTypeWarning,
Provisioning,
fmt.Sprintf(MessageCVCPublished, cvc.Name),
)
return nil
}
if cvc.Status.Phase == apis.CStorVolumeClaimPhasePending {
klog.V(2).Infof("provisioning cstor volume %+v", cvc)
_, err = c.createVolumeOperation(cvc)
if err != nil {
//Record an event to indicate that any provisioning operation is failed.
c.recorder.Eventf(cvc, corev1.EventTypeWarning, Provisioning, err.Error())
}
}
// If an error occurs during Get/Create, we'll requeue the item so we can
// attempt processing again later. This could have been caused by a
// temporary network failure, or any other transient reason.
if err != nil {
return err
}
if c.cvcNeedResize(cvc) {
err = c.resizeCVC(cvc)
}
// If an error occurs during Get/Create, we'll requeue the item so we can
// attempt processing again later. This could have been caused by a
// temporary network failure, or any other transient reason.
if err != nil {
return err
}
return nil
}
// UpdateCVCObj updates the cstorvolumeclaim object resource to reflect the
// current state of the world
func (c *CVCController) updateCVCObj(
cvc *apis.CStorVolumeClaim,
cv *apis.CStorVolume,
) error {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
cvcCopy := cvc.DeepCopy()
if cvc.Name != cv.Name {
return fmt.
Errorf("could not bind cstorvolumeclaim %s and cstorvolume %s, name does not match",
cvc.Name,
cv.Name)
}
_, err := c.clientset.OpenebsV1alpha1().CStorVolumeClaims(cvc.Namespace).Update(cvcCopy)
if err == nil {
c.recorder.Event(cvc, corev1.EventTypeNormal,
SuccessSynced,
MessageResourceCreated,
)
}
return err
}
// createVolumeOperation trigers the all required resource create operation.
// 1. Create volume service.
// 2. Create cstorvolume resource with required iscsi information.
// 3. Create target deployment.
// 4. Create cstorvolumeclaim resource.
// 5. Update the cstorvolumeclaim with claimRef info and bound with cstorvolume.
func (c *CVCController) createVolumeOperation(cvc *apis.CStorVolumeClaim) (*apis.CStorVolumeClaim, error) {
_ = cvc.Annotations[string(apis.ConfigClassKey)]
klog.V(2).Infof("creating cstorvolume service resource")
svcObj, err := getOrCreateTargetService(cvc)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume resource")
cvObj, err := getOrCreateCStorVolumeResource(svcObj, cvc)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume target deployment")
_, err = getOrCreateCStorTargetDeployment(cvObj)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume replica resource")
err = c.distributePendingCVRs(cvc, cvObj, svcObj)
if err != nil {
return nil, err
}
volumeRef, err := ref.GetReference(scheme.Scheme, cvObj)
if err != nil {
return nil, err
}
// update the cstorvolume reference, phase as "Bound" and desired
// capacity
cvc.Spec.CStorVolumeRef = volumeRef
cvc.Status.Phase = apis.CStorVolumeClaimPhaseBound
cvc.Status.Capacity = cvc.Spec.Capacity
err = c.updateCVCObj(cvc, cvObj)
if err != nil {
return nil, err
}
return cvc, nil
}
// distributePendingCVRs trigers create and distribute pending cstorvolumereplica
// resource among the available cstor pools
func (c *CVCController) distributePendingCVRs(
cvc *apis.CStorVolumeClaim,
cv *apis.CStorVolume,
service *corev1.Service,
) error {
pendingReplicaCount, err := c.getPendingCVRCount(cvc)
if err != nil {
return err
}
err = distributeCVRs(pendingReplicaCount, cvc, service, cv)
if err != nil {
return err
}
return nil
}
// isClaimDeletionCandidate checks if a cstorvolumeclaim is a deletion candidate.
func (c *CVCController) isClaimDeletionCandidate(cvc *apis.CStorVolumeClaim) bool {
return cvc.ObjectMeta.DeletionTimestamp != nil &&
slice.ContainsString(cvc.ObjectMeta.Finalizers, CStorVolumeClaimFinalizer, nil)
}
// removeFinalizer removes finalizers present in CStorVolumeClaim resource
// TODO Avoid removing clone finalizer
func (c *CVCController) removeClaimFinalizer(
cvc *apis.CStorVolumeClaim,
) error {
cvcPatch := []Patch{
Patch{
Op: "remove",
Path: "/metadata/finalizers",
},
}
cvcPatchBytes, err := json.Marshal(cvcPatch)
if err != nil {
return merrors.Wrapf(
err,
"failed to remove finalizers from cstorvolumeclaim {%s}",
cvc.Name,
)
}
_, err = c.clientset.
OpenebsV1alpha1().
CStorVolumeClaims(cvc.Namespace).
Patch(cvc.Name, types.JSONPatchType, cvcPatchBytes)
if err != nil {
return merrors.Wrapf(
err,
"failed to remove finalizers from cstorvolumeclaim {%s}",
cvc.Name,
)
}
klog.Infof("finalizers removed successfully from cstorvolumeclaim {%s}", cvc.Name)
return nil
}
// getPendingCVRCount gets the pending replica count to be created
// in case of any failures
func (c *CVCController) getPendingCVRCount(
cvc *apis.CStorVolumeClaim,
) (int, error) {
currentReplicaCount, err := c.getCurrentReplicaCount(cvc)
if err != nil {
runtime.HandleError(err)
return 0, err
}
return cvc.Spec.ReplicaCount - currentReplicaCount, nil
}
// getCurrentReplicaCount give the current cstorvolumereplicas count for the
// given volume.
func (c *CVCController) getCurrentReplicaCount(cvc *apis.CStorVolumeClaim) (int, error) {
// TODO use lister
// CVRs, err := c.cvrLister.CStorVolumeReplicas(cvc.Namespace).
// List(klabels.Set(pvLabel).AsSelector())
pvLabel := pvSelector + "=" + cvc.Name
cvrList, err := c.clientset.
OpenebsV1alpha1().
CStorVolumeReplicas(cvc.Namespace).
List(metav1.ListOptions{LabelSelector: pvLabel})
if err != nil {
return 0, merrors.Errorf("unable to get current replica count: %v", err)
}
return len(cvrList.Items), nil
}
// IsCVRPending look for pending cstorvolume replicas compared to desired
// replica count. returns true if count doesn't matches.
func (c *CVCController) IsCVRPending(cvc *apis.CStorVolumeClaim) (bool, error) {
selector := klabels.SelectorFromSet(BaseLabels(cvc))
CVRs, err := c.cvrLister.CStorVolumeReplicas(cvc.Namespace).
List(selector)
if err != nil {
return false, merrors.Errorf("failed to list cvr : %v", err)
}
// TODO: check for greater values
return cvc.Spec.ReplicaCount != len(CVRs), nil
}
// BaseLabels returns the base labels we apply to cstorvolumereplicas created
func BaseLabels(cvc *apis.CStorVolumeClaim) map[string]string {
base := map[string]string{
pvSelector: cvc.Name,
}
return base
}
// cvcNeedResize returns true if a cvc desired a resize operation.
func (c *CVCController) cvcNeedResize(cvc *apis.CStorVolumeClaim) bool {
desiredCVCSize := cvc.Spec.Capacity[corev1.ResourceStorage]
actualCVCSize := cvc.Status.Capacity[corev1.ResourceStorage]
return desiredCVCSize.Cmp(actualCVCSize) > 0
}
// resizeCVC will:
// 1. Mark cvc as resizing.
// 2. Resize the cstorvolume object.
// 3. Mark cvc as resizing finished
func (c *CVCController) resizeCVC(cvc *apis.CStorVolumeClaim) error {
var updatedCVC *apis.CStorVolumeClaim
var err error
cv, err := c.clientset.OpenebsV1alpha1().CStorVolumes(cvc.Namespace).
Get(cvc.Name, metav1.GetOptions{})
if err != nil {
runtime.HandleError(fmt.Errorf("falied to get cv %s: %v", cvc.Name, err))
return err
}
desiredCVCSize := cvc.Spec.Capacity[corev1.ResourceStorage]
if (cv.Spec.Capacity).Cmp(cv.Status.Capacity) > 0 {
c.recorder.Event(cvc, corev1.EventTypeNormal, string(apis.CStorVolumeClaimResizing),
fmt.Sprintf("Resize already in progress %s", cvc.Name))
klog.Warningf("Resize already in progress on %q from: %v to: %v",
cvc.Name, cv.Status.Capacity.String(), cv.Spec.Capacity.String())
return nil
}
// markCVC as resized finished
if desiredCVCSize.Cmp(cv.Status.Capacity) == 0 {
// Resize volume succeeded mark it as resizing finished.
return c.markCVCResizeFinished(cvc)
}
//if desiredCVCSize.Cmp(cv.Spec.Capacity) > 0 {
if updatedCVC, err = c.markCVCResizeInProgress(cvc); err != nil {
klog.Errorf("failed to mark cvc %q as resizing: %v", cvc.Name, err)
return err
}
cvc = updatedCVC
// Record an event to indicate that cvc-controller is resizing this volume.
c.recorder.Event(cvc, corev1.EventTypeNormal, string(apis.CStorVolumeClaimResizing),
fmt.Sprintf("CVCController is resizing volume %s", cvc.Name))
err = c.resizeCV(cv, desiredCVCSize)
if err != nil {
// Record an event to indicate that resize operation is failed.
c.recorder.Eventf(cvc, corev1.EventTypeWarning, string(apis.CStorVolumeClaimResizeFailed), err.Error())
return err
}
return nil
}
func (c *CVCController) markCVCResizeInProgress(cvc *apis.CStorVolumeClaim) (*apis.CStorVolumeClaim, error) {
// Mark CVC as Resize Started
progressCondition := apis.CStorVolumeClaimCondition{
Type: apis.CStorVolumeClaimResizing,
LastTransitionTime: metav1.Now(),
}
newCVC := cvc.DeepCopy()
newCVC.Status.Conditions = MergeResizeConditionsOfCVC(newCVC.Status.Conditions,
[]apis.CStorVolumeClaimCondition{progressCondition})
return c.PatchCVCStatus(cvc, newCVC)
}
type resizeProcessStatus struct {
condition apis.CStorVolumeClaimCondition
processed bool
}
// MergeResizeConditionsOfCVC updates cvc with desired resize conditions
// leaving other conditions untouched.
func MergeResizeConditionsOfCVC(oldConditions, resizeConditions []apis.CStorVolumeClaimCondition) []apis.CStorVolumeClaimCondition {
resizeConditionMap := map[apis.CStorVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
newConditions := []apis.CStorVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
newConditions = append(newConditions, newCondition.condition)
newCondition.processed = true
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
return newConditions
}
func (c *CVCController) markCVCResizeFinished(cvc *apis.CStorVolumeClaim) error {
newCVC := cvc.DeepCopy()
newCVC.Status.Capacity = cvc.Spec.Capacity
newCVC.Status.Conditions = MergeResizeConditionsOfCVC(cvc.Status.Conditions, []apis.CStorVolumeClaimCondition{})
_, err := c.PatchCVCStatus(cvc, newCVC)
if err != nil {
klog.Errorf("Mark CVC %q as resize finished failed: %v", cvc.Name, err)
return err
}
klog.V(4).Infof("Resize CVC %q finished", cvc.Name)
c.recorder.Eventf(cvc, corev1.EventTypeNormal, string(apis.CStorVolumeClaimResizeSuccess), "Resize volume succeeded")
return nil
}
// PatchCVCStatus updates CVC status using patch api
func (c *CVCController) PatchCVCStatus(oldCVC,
newCVC *apis.CStorVolumeClaim,
) (*apis.CStorVolumeClaim, error) {
patchBytes, err := getPatchData(oldCVC, newCVC)
if err != nil {
return nil, fmt.Errorf("can't patch status of CVC %s as generate path data failed: %v", oldCVC.Name, err)
}
updatedClaim, updateErr := c.clientset.OpenebsV1alpha1().CStorVolumeClaims(oldCVC.Namespace).
Patch(oldCVC.Name, types.MergePatchType, patchBytes)
if updateErr != nil {
return nil, fmt.Errorf("can't patch status of CVC %s with %v", oldCVC.Name, updateErr)
}
return updatedClaim, nil
}
func getPatchData(oldObj, newObj interface{}) ([]byte, error) {
oldData, err := json.Marshal(oldObj)
if err != nil {
return nil, fmt.Errorf("marshal old object failed: %v", err)
}
newData, err := json.Marshal(newObj)
if err != nil {
return nil, fmt.Errorf("mashal new object failed: %v", err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldObj)
if err != nil {
return nil, fmt.Errorf("CreateTwoWayMergePatch failed: %v", err)
}
return patchBytes, nil
}
// resizeCV resize the cstor volume to desired size, and update CV's capacity
func (c *CVCController) resizeCV(cv *apis.CStorVolume, newCapacity resource.Quantity) error {
newCV := cv.DeepCopy()
newCV.Spec.Capacity = newCapacity
patchBytes, err := getPatchData(cv, newCV)
if err != nil {
return fmt.Errorf("can't update capacity of CV %s as generate patch data failed: %v", cv.Name, err)
}
_, updateErr := c.clientset.OpenebsV1alpha1().CStorVolumes(getNamespace()).
Patch(cv.Name, types.MergePatchType, patchBytes)
if updateErr != nil {
return updateErr
}
return nil
}
| 1 | 17,732 | Better to push getting policy details into a func we can reuse later. | openebs-maya | go |
@@ -52,6 +52,14 @@ type outbound struct {
URL string
}
+func (o outbound) Start() error {
+ return nil // nothing to do
+}
+
+func (o outbound) Stop() error {
+ return nil // nothing to do
+}
+
func (o outbound) Call(ctx context.Context, req *transport.Request) (*transport.Response, error) {
start := time.Now()
deadline, _ := ctx.Deadline() | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/yarpc/yarpc-go/internal/baggage"
"github.com/yarpc/yarpc-go/internal/errors"
"github.com/yarpc/yarpc-go/transport"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
// NewOutbound builds a new HTTP outbound that sends requests to the given
// URL.
func NewOutbound(url string) transport.Outbound {
return NewOutboundWithClient(url, nil)
}
// NewOutboundWithClient builds a new HTTP outbound that sends requests to the
// given URL using the given HTTP client.
func NewOutboundWithClient(url string, client *http.Client) transport.Outbound {
return outbound{Client: client, URL: url}
}
type outbound struct {
Client *http.Client
URL string
}
func (o outbound) Call(ctx context.Context, req *transport.Request) (*transport.Response, error) {
start := time.Now()
deadline, _ := ctx.Deadline()
ttl := deadline.Sub(start)
request, err := http.NewRequest("POST", o.URL, req.Body)
if err != nil {
return nil, err
}
request.Header = applicationHeaders.ToHTTPHeaders(req.Headers, nil)
if hs := baggage.FromContext(ctx); hs.Len() > 0 {
request.Header = baggageHeaders.ToHTTPHeaders(hs, request.Header)
}
request.Header.Set(CallerHeader, req.Caller)
request.Header.Set(ServiceHeader, req.Service)
request.Header.Set(ProcedureHeader, req.Procedure)
request.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
encoding := string(req.Encoding)
if encoding != "" {
request.Header.Set(EncodingHeader, encoding)
}
response, err := ctxhttp.Do(ctx, o.Client, request)
if err != nil {
if err == context.DeadlineExceeded {
return nil, errors.NewTimeoutError(req.Service, req.Procedure, deadline.Sub(start))
}
return nil, err
}
if response.StatusCode >= 200 && response.StatusCode < 300 {
appHeaders := applicationHeaders.FromHTTPHeaders(
response.Header, transport.NewHeaders())
return &transport.Response{
Headers: appHeaders,
Body: response.Body,
}, nil
}
// TODO Behavior for 300-range status codes is undefined
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
if err := response.Body.Close(); err != nil {
return nil, err
}
// Trim the trailing newline from HTTP error messages
message := strings.TrimSuffix(string(contents), "\n")
if response.StatusCode >= 400 && response.StatusCode < 500 {
return nil, errors.RemoteBadRequestError(message)
}
return nil, errors.RemoteUnexpectedError(message)
}
| 1 | 10,148 | optional: while there's nothing to do, a good way to catch bugs (where we use an outbound without calling `Start`) might be to have this outbound verify that `Start` is called before `Call` or `Stop` | yarpc-yarpc-go | go |
@@ -20,6 +20,9 @@ import (
"flag"
"os"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators/cluster"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators/machine"
clusterapis "sigs.k8s.io/cluster-api/pkg/apis"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
"sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
clusterapis "sigs.k8s.io/cluster-api/pkg/apis"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
"sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset"
capicluster "sigs.k8s.io/cluster-api/pkg/controller/cluster"
capimachine "sigs.k8s.io/cluster-api/pkg/controller/machine"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators/cluster"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators/machine"
)
func main() {
cfg := config.GetConfigOrDie()
flag.Parse()
log := logf.Log.WithName("aws-controller-manager")
logf.SetLogger(logf.ZapLogger(false))
entryLog := log.WithName("entrypoint")
// Setup a Manager
mgr, err := manager.New(cfg, manager.Options{})
if err != nil {
entryLog.Error(err, "unable to set up overall controller manager")
os.Exit(1)
}
cs, err := clientset.NewForConfig(cfg)
if err != nil {
panic(err)
}
clusterActuator, _ := cluster.NewActuator(cluster.ActuatorParams{
ClustersGetter: cs.ClusterV1alpha1(),
})
machineActuator, _ := machine.NewActuator(machine.ActuatorParams{
MachinesGetter: cs.ClusterV1alpha1(),
})
// Register our cluster deployer (the interface is in clusterctl and we define the Deployer interface on the actuator)
common.RegisterClusterProvisioner("aws", clusterActuator)
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
if err := clusterapis.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
capimachine.AddWithActuator(mgr, machineActuator)
capicluster.AddWithActuator(mgr, clusterActuator)
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
entryLog.Error(err, "unable to run manager")
os.Exit(1)
}
}
| 1 | 6,856 | Not a blocker by any means, but I think there is value in keeping the local imports in a separate group. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -295,3 +295,14 @@ func TestStartStopFailures(t *testing.T) {
}
}
}
+
+func TestNoOutboundsForService(t *testing.T) {
+ assert.Panics(t, func() {
+ NewDispatcher(Config{
+ Name: "test",
+ Outbounds: Outbounds{
+ "my-test-service": {},
+ },
+ })
+ })
+} | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc_test
import (
"errors"
"fmt"
"testing"
. "go.uber.org/yarpc"
"go.uber.org/yarpc/transport"
"go.uber.org/yarpc/transport/http"
tch "go.uber.org/yarpc/transport/tchannel"
"go.uber.org/yarpc/transport/transporttest"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber/tchannel-go"
)
func basicDispatcher(t *testing.T) Dispatcher {
ch, err := tchannel.NewChannel("test", nil)
require.NoError(t, err, "failed to create TChannel")
return NewDispatcher(Config{
Name: "test",
Inbounds: Inbounds{
tch.NewInbound(ch).WithListenAddr(":0"),
http.NewInbound(":0"),
},
})
}
func TestInboundsReturnsACopy(t *testing.T) {
dispatcher := basicDispatcher(t)
inbounds := dispatcher.Inbounds()
require.Len(t, inbounds, 2, "expected two inbounds")
assert.NotNil(t, inbounds[0], "must not be nil")
assert.NotNil(t, inbounds[1], "must not be nil")
// Mutate the list and verify that the next call still returns non-nil
// results.
inbounds[0] = nil
inbounds[1] = nil
inbounds = dispatcher.Inbounds()
require.Len(t, inbounds, 2, "expected two inbounds")
assert.NotNil(t, inbounds[0], "must not be nil")
assert.NotNil(t, inbounds[1], "must not be nil")
}
func TestInboundsOrderIsMaintained(t *testing.T) {
dispatcher := basicDispatcher(t)
// Order must be maintained
_, ok := dispatcher.Inbounds()[0].(*tch.Inbound)
assert.True(t, ok, "first inbound must be TChannel")
_, ok = dispatcher.Inbounds()[1].(*http.Inbound)
assert.True(t, ok, "second inbound must be HTTP")
}
func TestInboundsOrderAfterStart(t *testing.T) {
dispatcher := basicDispatcher(t)
require.NoError(t, dispatcher.Start(), "failed to start Dispatcher")
defer dispatcher.Stop()
inbounds := dispatcher.Inbounds()
tchInbound := inbounds[0].(*tch.Inbound)
assert.NotEqual(t, "0.0.0.0:0", tchInbound.Channel().PeerInfo().HostPort)
httpInbound := inbounds[1].(*http.Inbound)
assert.NotNil(t, httpInbound.Addr(), "expected an HTTP addr")
}
func TestStartStopFailures(t *testing.T) {
tests := []struct {
desc string
inbounds func(*gomock.Controller) Inbounds
outbounds func(*gomock.Controller) Outbounds
wantStartErr string
wantStopErr string
}{
{
desc: "all success",
inbounds: func(mockCtrl *gomock.Controller) Inbounds {
inbounds := make(Inbounds, 10)
for i := range inbounds {
in := transporttest.NewMockInbound(mockCtrl)
in.EXPECT().SetRegistry(gomock.Any())
in.EXPECT().Start().Return(nil)
in.EXPECT().Stop().Return(nil)
inbounds[i] = in
}
return inbounds
},
outbounds: func(mockCtrl *gomock.Controller) Outbounds {
outbounds := make(Outbounds, 10)
for i := 0; i < 10; i++ {
out := transporttest.NewMockUnaryOutbound(mockCtrl)
out.EXPECT().Start().Return(nil)
out.EXPECT().Stop().Return(nil)
outbounds[fmt.Sprintf("service-%v", i)] =
transport.Outbounds{
Unary: out,
}
}
return outbounds
},
},
{
desc: "inbound 6 start failure",
inbounds: func(mockCtrl *gomock.Controller) Inbounds {
inbounds := make(Inbounds, 10)
for i := range inbounds {
in := transporttest.NewMockInbound(mockCtrl)
in.EXPECT().SetRegistry(gomock.Any())
if i == 6 {
in.EXPECT().Start().Return(errors.New("great sadness"))
} else {
in.EXPECT().Start().Return(nil)
in.EXPECT().Stop().Return(nil)
}
inbounds[i] = in
}
return inbounds
},
outbounds: func(mockCtrl *gomock.Controller) Outbounds {
outbounds := make(Outbounds, 10)
for i := 0; i < 10; i++ {
out := transporttest.NewMockUnaryOutbound(mockCtrl)
out.EXPECT().Start().Return(nil)
out.EXPECT().Stop().Return(nil)
outbounds[fmt.Sprintf("service-%v", i)] =
transport.Outbounds{
Unary: out,
}
}
return outbounds
},
wantStartErr: "great sadness",
},
{
desc: "inbound 7 stop failure",
inbounds: func(mockCtrl *gomock.Controller) Inbounds {
inbounds := make(Inbounds, 10)
for i := range inbounds {
in := transporttest.NewMockInbound(mockCtrl)
in.EXPECT().SetRegistry(gomock.Any())
in.EXPECT().Start().Return(nil)
if i == 7 {
in.EXPECT().Stop().Return(errors.New("great sadness"))
} else {
in.EXPECT().Stop().Return(nil)
}
inbounds[i] = in
}
return inbounds
},
outbounds: func(mockCtrl *gomock.Controller) Outbounds {
outbounds := make(Outbounds, 10)
for i := 0; i < 10; i++ {
out := transporttest.NewMockUnaryOutbound(mockCtrl)
out.EXPECT().Start().Return(nil)
out.EXPECT().Stop().Return(nil)
outbounds[fmt.Sprintf("service-%v", i)] =
transport.Outbounds{
Unary: out,
}
}
return outbounds
},
wantStopErr: "great sadness",
},
{
desc: "outbound 5 start failure",
inbounds: func(mockCtrl *gomock.Controller) Inbounds {
inbounds := make(Inbounds, 10)
for i := range inbounds {
in := transporttest.NewMockInbound(mockCtrl)
in.EXPECT().SetRegistry(gomock.Any())
in.EXPECT().Start().Return(nil)
in.EXPECT().Stop().Return(nil)
inbounds[i] = in
}
return inbounds
},
outbounds: func(mockCtrl *gomock.Controller) Outbounds {
outbounds := make(Outbounds, 10)
for i := 0; i < 10; i++ {
out := transporttest.NewMockUnaryOutbound(mockCtrl)
if i == 5 {
out.EXPECT().Start().Return(errors.New("something went wrong"))
} else {
out.EXPECT().Start().Return(nil)
out.EXPECT().Stop().Return(nil)
}
outbounds[fmt.Sprintf("service-%v", i)] =
transport.Outbounds{
Unary: out,
}
}
return outbounds
},
wantStartErr: "something went wrong",
// TODO: Include the name of the outbound in the error message
},
{
desc: "inbound 7 stop failure",
inbounds: func(mockCtrl *gomock.Controller) Inbounds {
inbounds := make(Inbounds, 10)
for i := range inbounds {
in := transporttest.NewMockInbound(mockCtrl)
in.EXPECT().SetRegistry(gomock.Any())
in.EXPECT().Start().Return(nil)
in.EXPECT().Stop().Return(nil)
inbounds[i] = in
}
return inbounds
},
outbounds: func(mockCtrl *gomock.Controller) Outbounds {
outbounds := make(Outbounds, 10)
for i := 0; i < 10; i++ {
out := transporttest.NewMockUnaryOutbound(mockCtrl)
out.EXPECT().Start().Return(nil)
if i == 7 {
out.EXPECT().Stop().Return(errors.New("something went wrong"))
} else {
out.EXPECT().Stop().Return(nil)
}
outbounds[fmt.Sprintf("service-%v", i)] =
transport.Outbounds{
Unary: out,
}
}
return outbounds
},
wantStopErr: "something went wrong",
// TODO: Include the name of the outbound in the error message
},
}
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
for _, tt := range tests {
dispatcher := NewDispatcher(Config{
Name: "test",
Inbounds: tt.inbounds(mockCtrl),
Outbounds: tt.outbounds(mockCtrl),
})
err := dispatcher.Start()
if tt.wantStartErr != "" {
if assert.Error(t, err, "%v: expected Start() to fail", tt.desc) {
assert.Contains(t, err.Error(), tt.wantStartErr, tt.desc)
}
continue
}
if !assert.NoError(t, err, "%v: expected Start() to succeed", tt.desc) {
continue
}
err = dispatcher.Stop()
if tt.wantStopErr == "" {
assert.NoError(t, err, "%v: expected Stop() to succeed", tt.desc)
continue
}
if assert.Error(t, err, "%v: expected Stop() to fail", tt.desc) {
assert.Contains(t, err.Error(), tt.wantStopErr, tt.desc)
}
}
}
| 1 | 11,639 | nit - I would test the error message as well. For panic, you might need to see if the stack contains the error message, instead of equaling. | yarpc-yarpc-go | go |
@@ -162,6 +162,18 @@ func substitute(s reflect.Value, replacer *strings.Replacer) {
case *compute.Client, *storage.Client, context.Context, context.CancelFunc:
// We specifically do not want to change fields with these types.
continue
+ case *WaitForInstancesStopped:
+ var newSlice WaitForInstancesStopped
+ for _, v := range *raw.(*WaitForInstancesStopped) {
+ newSlice = append(newSlice, replacer.Replace(v))
+ }
+ f.Set(reflect.ValueOf(&newSlice))
+ case *WaitForInstancesSignal:
+ var newSlice WaitForInstancesSignal
+ for _, v := range *raw.(*WaitForInstancesSignal) {
+ newSlice = append(newSlice, replacer.Replace(v))
+ }
+ f.Set(reflect.ValueOf(&newSlice))
default:
if f.Kind() != reflect.Ptr {
continue | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"context"
"fmt"
"math/rand"
"reflect"
"regexp"
"strings"
"time"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
)
var (
bucket = `([a-z0-9][-_.a-z0-9]*)`
object = `(.+)`
// Many of the Google Storage URLs are supported below.
// It is preferred that customers specify their object using
// its gs://<bucket>/<object> URL.
bucketRegex = regexp.MustCompile(fmt.Sprintf(`^gs://%s$`, bucket))
gsRegex = regexp.MustCompile(fmt.Sprintf(`^gs://%s/%s$`, bucket, object))
// Check for the Google Storage URLs:
// http://<bucket>.storage.googleapis.com/<object>
// https://<bucket>.storage.googleapis.com/<object>
gsHTTPRegex = regexp.MustCompile(fmt.Sprintf(`^http[s]?://%s\.storage\.googleapis\.com/%s$`, bucket, object))
// Check for the other possible Google Storage URLs:
// http://storage.googleapis.com/<bucket>/<object>
// https://storage.googleapis.com/<bucket>/<object>
//
// The following are deprecated but checked:
// http://commondatastorage.googleapis.com/<bucket>/<object>
// https://commondatastorage.googleapis.com/<bucket>/<object>
gsHTTPRegex2 = regexp.MustCompile(fmt.Sprintf(`^http[s]?://(?:commondata)?storage\.googleapis\.com/%s/%s$`, bucket, object))
)
func containsString(s string, ss []string) bool {
for _, x := range ss {
if s == x {
return true
}
}
return false
}
// filter creates a copy of ss, excluding any instances of s.
func filter(ss []string, s string) []string {
result := []string{}
for _, element := range ss {
if element != s {
result = append(result, element)
}
}
return result
}
func isLink(s string) bool {
return strings.Contains(s, "/")
}
func randString(n int) string {
gen := rand.New(rand.NewSource(time.Now().UnixNano()))
letters := "abcdefghijklmnopqrstuvwxyz"
b := make([]byte, n)
for i := range b {
b[i] = letters[gen.Int63()%int64(len(letters))]
}
return string(b)
}
func splitGCSPath(p string) (string, string, error) {
for _, rgx := range []*regexp.Regexp{gsRegex, gsHTTPRegex, gsHTTPRegex2} {
matches := rgx.FindStringSubmatch(p)
if matches != nil {
return matches[1], matches[2], nil
}
}
matches := bucketRegex.FindStringSubmatch(p)
if matches != nil {
return matches[1], "", nil
}
return "", "", fmt.Errorf("%q is not a valid GCS path", p)
}
// substitute iterates through the public fields of the struct represented
// by s, if the field type matches one of the known types the provided
// replacer.Replace is run on all string values replacing the original value
// in the underlying struct.
// Exceptions: will not change Vars fields or Workflow fields of SubWorkflow types.
func substitute(s reflect.Value, replacer *strings.Replacer) {
if s.Kind() != reflect.Struct {
return
}
for i := 0; i < s.NumField(); i++ {
// Skip the Vars field as thats where the replacer gets populated from.
if s.Type().Field(i).Name == "Vars" {
continue
}
f := s.Field(i)
// Don't attempt to modify private fields.
if !f.CanSet() {
continue
}
switch f.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
// A nil entry will cause additional reflect operations to panic.
if f.IsNil() {
continue
}
}
raw := f.Interface()
switch raw.(type) {
case string:
f.SetString(replacer.Replace(f.String()))
case []string:
var newSlice []string
for _, v := range raw.([]string) {
newSlice = append(newSlice, replacer.Replace(v))
}
f.Set(reflect.ValueOf(newSlice))
case map[string]string:
newMap := map[string]string{}
for k, v := range raw.(map[string]string) {
newMap[replacer.Replace(k)] = replacer.Replace(v)
}
f.Set(reflect.ValueOf(newMap))
case map[string][]string:
newMap := map[string][]string{}
for k, v := range raw.(map[string][]string) {
var newSlice []string
for _, sv := range v {
newSlice = append(newSlice, replacer.Replace(sv))
}
newMap[replacer.Replace(k)] = newSlice
}
f.Set(reflect.ValueOf(newMap))
case map[string]*Step:
newMap := map[string]*Step{}
for k, v := range raw.(map[string]*Step) {
substitute(reflect.ValueOf(v).Elem(), replacer)
newMap[replacer.Replace(k)] = v
}
f.Set(reflect.ValueOf(newMap))
case *compute.Client, *storage.Client, context.Context, context.CancelFunc:
// We specifically do not want to change fields with these types.
continue
default:
if f.Kind() != reflect.Ptr {
continue
}
switch e := f.Elem(); e.Kind() {
case reflect.Slice:
// Iterate through then run them back through substitute.
for i := 0; i < e.Len(); i++ {
substitute(e.Index(i), replacer)
}
case reflect.Struct:
// Run structs right back through substitute.
substitute(e, replacer)
}
}
}
}
func xor(x, y bool) bool {
return x != y
}
| 1 | 6,313 | Didn't we have specific logic for handling slices vs structs? | GoogleCloudPlatform-compute-image-tools | go |
@@ -4199,7 +4199,15 @@ void EntityList::QuestJournalledSayClose(Mob *sender, float dist, const char *mo
buf.WriteInt32(0); // location, client doesn't seem to do anything with this
buf.WriteInt32(0);
buf.WriteInt32(0);
- buf.WriteString(message);
+
+ // auto inject saylinks (say)
+ if (RuleB(Chat, AutoInjectSaylinksToSay)) {
+ std::string new_message = EQ::SayLinkEngine::InjectSaylinksIfNotExist(message);
+ buf.WriteString(new_message.c_str());
+ }
+ else {
+ buf.WriteString(message);
+ }
auto outapp = new EQApplicationPacket(OP_SpecialMesg, buf);
| 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2003 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "../common/global_define.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <iostream>
#ifdef _WINDOWS
#include <process.h>
#else
#include <pthread.h>
#include "../common/unix.h"
#endif
#include "../common/features.h"
#include "../common/guilds.h"
#include "entity.h"
#include "dynamic_zone.h"
#include "guild_mgr.h"
#include "petitions.h"
#include "quest_parser_collection.h"
#include "raids.h"
#include "string_ids.h"
#include "worldserver.h"
#include "water_map.h"
#include "npc_scale_manager.h"
#include "../common/say_link.h"
#ifdef _WINDOWS
#define snprintf _snprintf
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#endif
#ifdef BOTS
#include "bot.h"
#endif
extern Zone *zone;
extern volatile bool is_zone_loaded;
extern WorldServer worldserver;
extern uint32 numclients;
extern PetitionList petition_list;
extern char errorname[32];
Entity::Entity()
{
id = 0;
initial_id = 0;
spawn_timestamp = time(nullptr);
}
Entity::~Entity()
{
}
Client *Entity::CastToClient()
{
if (this == 0x00) {
LogError("CastToClient error (nullptr)");
return 0;
}
#ifdef _EQDEBUG
if (!IsClient()) {
LogError("CastToClient error (not client)");
return 0;
}
#endif
return static_cast<Client *>(this);
}
NPC *Entity::CastToNPC()
{
#ifdef _EQDEBUG
if (!IsNPC()) {
LogError("CastToNPC error (Not NPC)");
return 0;
}
#endif
return static_cast<NPC *>(this);
}
Mob *Entity::CastToMob()
{
#ifdef _EQDEBUG
if (!IsMob()) {
std::cout << "CastToMob error" << std::endl;
return 0;
}
#endif
return static_cast<Mob *>(this);
}
Merc *Entity::CastToMerc()
{
#ifdef _EQDEBUG
if (!IsMerc()) {
std::cout << "CastToMerc error" << std::endl;
return 0;
}
#endif
return static_cast<Merc *>(this);
}
Trap *Entity::CastToTrap()
{
#ifdef DEBUG
if (!IsTrap()) {
return 0;
}
#endif
return static_cast<Trap *>(this);
}
Corpse *Entity::CastToCorpse()
{
#ifdef _EQDEBUG
if (!IsCorpse()) {
std::cout << "CastToCorpse error" << std::endl;
return 0;
}
#endif
return static_cast<Corpse *>(this);
}
Object *Entity::CastToObject()
{
#ifdef _EQDEBUG
if (!IsObject()) {
std::cout << "CastToObject error" << std::endl;
return 0;
}
#endif
return static_cast<Object *>(this);
}
/*Group* Entity::CastToGroup() {
#ifdef _EQDEBUG
if(!IsGroup()) {
std::cout << "CastToGroup error" << std::endl;
return 0;
}
#endif
return static_cast<Group*>(this);
}*/
Doors *Entity::CastToDoors()
{
return static_cast<Doors *>(this);
}
Beacon *Entity::CastToBeacon()
{
return static_cast<Beacon *>(this);
}
Encounter *Entity::CastToEncounter()
{
return static_cast<Encounter *>(this);
}
const Client *Entity::CastToClient() const
{
if (this == 0x00) {
std::cout << "CastToClient error (nullptr)" << std::endl;
return 0;
}
#ifdef _EQDEBUG
if (!IsClient()) {
std::cout << "CastToClient error (not client?)" << std::endl;
return 0;
}
#endif
return static_cast<const Client *>(this);
}
const NPC *Entity::CastToNPC() const
{
#ifdef _EQDEBUG
if (!IsNPC()) {
std::cout << "CastToNPC error" << std::endl;
return 0;
}
#endif
return static_cast<const NPC *>(this);
}
const Mob *Entity::CastToMob() const
{
#ifdef _EQDEBUG
if (!IsMob()) {
std::cout << "CastToMob error" << std::endl;
return 0;
}
#endif
return static_cast<const Mob *>(this);
}
const Merc *Entity::CastToMerc() const
{
#ifdef _EQDEBUG
if (!IsMerc()) {
std::cout << "CastToMerc error" << std::endl;
return 0;
}
#endif
return static_cast<const Merc *>(this);
}
const Trap *Entity::CastToTrap() const
{
#ifdef DEBUG
if (!IsTrap()) {
return 0;
}
#endif
return static_cast<const Trap *>(this);
}
const Corpse *Entity::CastToCorpse() const
{
#ifdef _EQDEBUG
if (!IsCorpse()) {
std::cout << "CastToCorpse error" << std::endl;
return 0;
}
#endif
return static_cast<const Corpse *>(this);
}
const Object *Entity::CastToObject() const
{
#ifdef _EQDEBUG
if (!IsObject()) {
std::cout << "CastToObject error" << std::endl;
return 0;
}
#endif
return static_cast<const Object *>(this);
}
const Doors *Entity::CastToDoors() const
{
return static_cast<const Doors *>(this);
}
const Beacon* Entity::CastToBeacon() const
{
return static_cast<const Beacon *>(this);
}
const Encounter* Entity::CastToEncounter() const
{
return static_cast<const Encounter *>(this);
}
#ifdef BOTS
Bot *Entity::CastToBot()
{
#ifdef _EQDEBUG
if (!IsBot()) {
std::cout << "CastToBot error" << std::endl;
return 0;
}
#endif
return static_cast<Bot *>(this);
}
const Bot *Entity::CastToBot() const
{
#ifdef _EQDEBUG
if (!IsBot()) {
std::cout << "CastToBot error" << std::endl;
return 0;
}
#endif
return static_cast<const Bot *>(this);
}
#endif
EntityList::EntityList()
:
object_timer(5000),
door_timer(5000),
corpse_timer(2000),
group_timer(1000),
raid_timer(1000),
trap_timer(1000)
{
// set up ids between 1 and 1500
// neither client or server performs well if you have
// enough entities to exhaust this list
for (uint16 i = 1; i <= 1500; i++)
free_ids.push(i);
}
EntityList::~EntityList()
{
//must call this before the list is destroyed, or else it will try to
//delete the NPCs in the list, which it cannot do.
RemoveAllLocalities();
}
bool EntityList::CanAddHateForMob(Mob *p)
{
int count = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC *npc = it->second;
if (npc->IsOnHatelist(p))
count++;
// no need to continue if we already hit the limit
if (count > 3)
return false;
++it;
}
if (count <= 2)
return true;
return false;
}
void EntityList::AddClient(Client *client)
{
client->SetID(GetFreeID());
client_list.insert(std::pair<uint16, Client *>(client->GetID(), client));
mob_list.insert(std::pair<uint16, Mob *>(client->GetID(), client));
}
void EntityList::TrapProcess()
{
if (numclients < 1)
return;
if (trap_list.empty()) {
trap_timer.Disable();
return;
}
auto it = trap_list.begin();
while (it != trap_list.end()) {
if (!it->second->Process()) {
safe_delete(it->second);
free_ids.push(it->first);
it = trap_list.erase(it);
} else {
++it;
}
}
}
// Debug function -- checks to see if group_list has any nullptr entries.
// Meant to be called after each group-related function, in order
// to track down bugs.
void EntityList::CheckGroupList (const char *fname, const int fline)
{
std::list<Group *>::iterator it;
for (it = group_list.begin(); it != group_list.end(); ++it)
{
if (*it == nullptr)
{
LogError("nullptr group, [{}]:[{}]", fname, fline);
}
}
}
void EntityList::GroupProcess()
{
if (numclients < 1)
return;
if (group_list.empty()) {
group_timer.Disable();
return;
}
for (auto &group : group_list)
group->Process();
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
}
void EntityList::QueueToGroupsForNPCHealthAA(Mob *sender, const EQApplicationPacket *app)
{
for (auto &group : group_list)
group->QueueHPPacketsForNPCHealthAA(sender, app);
}
void EntityList::RaidProcess()
{
if (numclients < 1)
return;
if (raid_list.empty()) {
raid_timer.Disable();
return;
}
for (auto &raid : raid_list)
raid->Process();
}
void EntityList::DoorProcess()
{
#ifdef IDLE_WHEN_EMPTY
if (numclients < 1)
return;
#endif
if (door_list.empty()) {
door_timer.Disable();
return;
}
auto it = door_list.begin();
while (it != door_list.end()) {
if (!it->second->Process()) {
safe_delete(it->second);
free_ids.push(it->first);
it = door_list.erase(it);
}
++it;
}
}
void EntityList::ObjectProcess()
{
if (object_list.empty()) {
object_timer.Disable();
return;
}
auto it = object_list.begin();
while (it != object_list.end()) {
if (!it->second->Process()) {
safe_delete(it->second);
free_ids.push(it->first);
it = object_list.erase(it);
} else {
++it;
}
}
}
void EntityList::CorpseProcess()
{
if (corpse_list.empty()) {
corpse_timer.Disable(); // No corpses in list
return;
}
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (!it->second->Process()) {
safe_delete(it->second);
free_ids.push(it->first);
it = corpse_list.erase(it);
} else {
++it;
}
}
}
void EntityList::MobProcess()
{
bool mob_dead;
auto it = mob_list.begin();
while (it != mob_list.end()) {
uint16 id = it->first;
Mob *mob = it->second;
size_t sz = mob_list.size();
#ifdef IDLE_WHEN_EMPTY
static int old_client_count=0;
static Timer *mob_settle_timer = new Timer();
if (numclients == 0 && old_client_count > 0 &&
RuleI(Zone, SecondsBeforeIdle) > 0) {
// Start Timer to allow any mobs that chased chars from zone
// to return home.
mob_settle_timer->Start(RuleI(Zone, SecondsBeforeIdle) * 1000);
}
old_client_count = numclients;
// Disable settle timer if someone zones into empty zone
if (numclients > 0 || mob_settle_timer->Check()) {
mob_settle_timer->Disable();
}
Spawn2* s2 = mob->CastToNPC()->respawn2;
// Perform normal mob processing if any of these are true:
// -- zone is not empty
// -- a quest has turned it on for this zone while zone is idle
// -- the entity's spawn2 point is marked as path_while_zone_idle
// -- the zone is newly empty and we're allowing mobs to settle
if (zone->process_mobs_while_empty || numclients > 0 ||
(s2 && s2->PathWhenZoneIdle()) || mob_settle_timer->Enabled()) {
mob_dead = !mob->Process();
}
else {
// spawn_events can cause spawns and deaths while zone empty.
// At the very least, process that.
mob_dead = mob->CastToNPC()->GetDepop();
}
#else
mob_dead = !mob->Process();
#endif
size_t a_sz = mob_list.size();
if(a_sz > sz) {
//increased size can potentially screw with iterators so reset it to current value
//if buckets are re-orderered we may skip a process here and there but since
//process happens so often it shouldn't matter much
it = mob_list.find(id);
++it;
} else {
++it;
}
if(mob_dead) {
if(mob->IsMerc()) {
entity_list.RemoveMerc(id);
}
#ifdef BOTS
else if(mob->IsBot()) {
entity_list.RemoveBot(id);
}
#endif
else if(mob->IsNPC()) {
entity_list.RemoveNPC(id);
}
else {
#ifdef _WINDOWS
struct in_addr in;
in.s_addr = mob->CastToClient()->GetIP();
LogInfo("Dropping client: Process=false, ip=[{}] port=[{}]", inet_ntoa(in), mob->CastToClient()->GetPort());
#endif
zone->StartShutdownTimer();
Group *g = GetGroupByMob(mob);
if(g) {
LogError("About to delete a client still in a group");
g->DelMember(mob);
}
Raid *r = entity_list.GetRaidByClient(mob->CastToClient());
if(r) {
LogError("About to delete a client still in a raid");
r->MemberZoned(mob->CastToClient());
}
entity_list.RemoveClient(id);
}
entity_list.RemoveMob(id);
}
}
}
void EntityList::BeaconProcess()
{
auto it = beacon_list.begin();
while (it != beacon_list.end()) {
if (!it->second->Process()) {
safe_delete(it->second);
free_ids.push(it->first);
it = beacon_list.erase(it);
} else {
++it;
}
}
}
void EntityList::EncounterProcess()
{
auto it = encounter_list.begin();
while (it != encounter_list.end()) {
if (!it->second->Process()) {
// if Process is returning false here, we probably just got called from ReloadQuests .. oh well
parse->RemoveEncounter(it->second->GetEncounterName());
safe_delete(it->second);
free_ids.push(it->first);
it = encounter_list.erase(it);
}
else {
++it;
}
}
}
void EntityList::AddGroup(Group *group)
{
if (group == nullptr) //this seems to be happening somehow...
return;
uint32 gid = worldserver.NextGroupID();
if (gid == 0) {
LogError("Unable to get new group ID from world server. group is going to be broken");
return;
}
AddGroup(group, gid);
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
}
void EntityList::AddGroup(Group *group, uint32 gid)
{
group->SetID(gid);
group_list.push_back(group);
if (!group_timer.Enabled())
group_timer.Start();
#if EQDEBUG >= 5
CheckGroupList(__FILE__, __LINE__);
#endif
}
void EntityList::AddRaid(Raid *raid)
{
if (raid == nullptr)
return;
uint32 gid = worldserver.NextGroupID();
if (gid == 0) {
LogError("Unable to get new group ID from world server. group is going to be broken");
return;
}
AddRaid(raid, gid);
}
void EntityList::AddRaid(Raid *raid, uint32 gid)
{
raid->SetID(gid);
raid_list.push_back(raid);
if (!raid_timer.Enabled())
raid_timer.Start();
}
void EntityList::AddCorpse(Corpse *corpse, uint32 in_id)
{
if (corpse == 0)
return;
if (in_id == 0xFFFFFFFF)
corpse->SetID(GetFreeID());
else
corpse->SetID(in_id);
corpse->CalcCorpseName();
corpse_list.insert(std::pair<uint16, Corpse *>(corpse->GetID(), corpse));
if (!corpse_timer.Enabled())
corpse_timer.Start();
}
void EntityList::AddNPC(NPC *npc, bool SendSpawnPacket, bool dontqueue)
{
npc->SetID(GetFreeID());
parse->EventNPC(EVENT_SPAWN, npc, nullptr, "", 0);
uint16 emoteid = npc->GetEmoteID();
if (emoteid != 0)
npc->DoNPCEmote(ONSPAWN, emoteid);
npc->SetSpawned();
if (SendSpawnPacket) {
if (dontqueue) { // aka, SEND IT NOW BITCH!
auto app = new EQApplicationPacket;
npc->CreateSpawnPacket(app, npc);
QueueClients(npc, app);
npc->SendArmorAppearance();
npc->SetAppearance(npc->GetGuardPointAnim(),false);
if (!npc->IsTargetable())
npc->SendTargetable(false);
safe_delete(app);
} else {
auto ns = new NewSpawn_Struct;
memset(ns, 0, sizeof(NewSpawn_Struct));
npc->FillSpawnStruct(ns, nullptr); // Not working on player newspawns, so it's safe to use a ForWho of 0
AddToSpawnQueue(npc->GetID(), &ns);
safe_delete(ns);
}
if (npc->IsFindable())
UpdateFindableNPCState(npc, false);
}
npc_list.insert(std::pair<uint16, NPC *>(npc->GetID(), npc));
mob_list.insert(std::pair<uint16, Mob *>(npc->GetID(), npc));
entity_list.ScanCloseMobs(npc->close_mobs, npc, true);
/* Zone controller process EVENT_SPAWN_ZONE */
if (RuleB(Zone, UseZoneController)) {
if (entity_list.GetNPCByNPCTypeID(ZONE_CONTROLLER_NPC_ID) && npc->GetNPCTypeID() != ZONE_CONTROLLER_NPC_ID){
char data_pass[100] = { 0 };
snprintf(data_pass, 99, "%d %d", npc->GetID(), npc->GetNPCTypeID());
parse->EventNPC(EVENT_SPAWN_ZONE, entity_list.GetNPCByNPCTypeID(ZONE_CONTROLLER_NPC_ID)->CastToNPC(), nullptr, data_pass, 0);
}
}
/**
* Set whether NPC was spawned in or out of water
*/
if (zone->HasMap() && zone->HasWaterMap()) {
npc->SetSpawnedInWater(false);
if (zone->watermap->InLiquid(npc->GetPosition())) {
npc->SetSpawnedInWater(true);
}
}
}
void EntityList::AddMerc(Merc *merc, bool SendSpawnPacket, bool dontqueue)
{
if (merc)
{
merc->SetID(GetFreeID());
merc->SetSpawned();
if (SendSpawnPacket)
{
if (dontqueue) {
// Send immediately
auto outapp = new EQApplicationPacket();
merc->CreateSpawnPacket(outapp);
outapp->priority = 6;
QueueClients(merc, outapp, true);
safe_delete(outapp);
} else {
// Queue the packet
auto ns = new NewSpawn_Struct;
memset(ns, 0, sizeof(NewSpawn_Struct));
merc->FillSpawnStruct(ns, 0);
AddToSpawnQueue(merc->GetID(), &ns);
safe_delete(ns);
}
}
merc_list.insert(std::pair<uint16, Merc *>(merc->GetID(), merc));
mob_list.insert(std::pair<uint16, Mob *>(merc->GetID(), merc));
}
}
void EntityList::AddObject(Object *obj, bool SendSpawnPacket)
{
obj->SetID(GetFreeID());
if (SendSpawnPacket) {
EQApplicationPacket app;
obj->CreateSpawnPacket(&app);
#if (EQDEBUG >= 6)
DumpPacket(&app);
#endif
QueueClients(0, &app,false);
}
object_list.insert(std::pair<uint16, Object *>(obj->GetID(), obj));
if (!object_timer.Enabled())
object_timer.Start();
}
void EntityList::AddDoor(Doors *door)
{
door->SetEntityID(GetFreeID());
door_list.insert(std::pair<uint16, Doors *>(door->GetEntityID(), door));
if (!door_timer.Enabled())
door_timer.Start();
}
void EntityList::AddTrap(Trap *trap)
{
trap->SetID(GetFreeID());
trap_list.insert(std::pair<uint16, Trap *>(trap->GetID(), trap));
if (!trap_timer.Enabled())
trap_timer.Start();
}
void EntityList::AddBeacon(Beacon *beacon)
{
beacon->SetID(GetFreeID());
beacon_list.insert(std::pair<uint16, Beacon *>(beacon->GetID(), beacon));
}
void EntityList::AddEncounter(Encounter *encounter)
{
encounter->SetID(GetFreeID());
encounter_list.insert(std::pair<uint16, Encounter *>(encounter->GetID(), encounter));
}
void EntityList::AddToSpawnQueue(uint16 entityid, NewSpawn_Struct **ns)
{
uint32 count;
if ((count = (client_list.size())) == 0)
return;
SpawnQueue.Append(*ns);
NumSpawnsOnQueue++;
if (tsFirstSpawnOnQueue == 0xFFFFFFFF)
tsFirstSpawnOnQueue = Timer::GetCurrentTime();
*ns = nullptr;
}
void EntityList::CheckSpawnQueue()
{
// Send the stuff if the oldest packet on the queue is older than 50ms -Quagmire
if (tsFirstSpawnOnQueue != 0xFFFFFFFF && (Timer::GetCurrentTime() - tsFirstSpawnOnQueue) > 50) {
LinkedListIterator<NewSpawn_Struct *> iterator(SpawnQueue);
EQApplicationPacket *outapp = 0;
iterator.Reset();
NewSpawn_Struct *ns;
while(iterator.MoreElements()) {
outapp = new EQApplicationPacket;
ns = iterator.GetData();
Mob::CreateSpawnPacket(outapp, ns);
QueueClients(0, outapp);
auto it = npc_list.find(ns->spawn.spawnId);
if (it == npc_list.end()) {
// We must of despawned, hope that's the reason!
LogError("Error in EntityList::CheckSpawnQueue: Unable to find NPC for spawnId [{}]", ns->spawn.spawnId);
}
else {
NPC *pnpc = it->second;
pnpc->SendArmorAppearance();
pnpc->SetAppearance(pnpc->GetGuardPointAnim(), false);
if (!pnpc->IsTargetable())
pnpc->SendTargetable(false);
}
safe_delete(outapp);
iterator.RemoveCurrent();
}
tsFirstSpawnOnQueue = 0xFFFFFFFF;
NumSpawnsOnQueue = 0;
}
}
Doors *EntityList::FindDoor(uint8 door_id)
{
if (door_id == 0 || door_list.empty())
return nullptr;
auto it = door_list.begin();
while (it != door_list.end()) {
if (it->second->GetDoorID() == door_id)
return it->second;
++it;
}
return nullptr;
}
Object *EntityList::FindObject(uint32 object_id)
{
if (object_id == 0 || object_list.empty())
return nullptr;
auto it = object_list.begin();
while (it != object_list.end()) {
if (it->second->GetDBID() == object_id)
return it->second;
++it;
}
return nullptr;
}
Object *EntityList::FindNearbyObject(float x, float y, float z, float radius)
{
if (object_list.empty())
return nullptr;
float ox;
float oy;
float oz;
auto it = object_list.begin();
while (it != object_list.end()) {
Object *object = it->second;
object->GetLocation(&ox, &oy, &oz);
ox = (x < ox) ? (ox - x) : (x - ox);
oy = (y < oy) ? (oy - y) : (y - oy);
oz = (z < oz) ? (oz - z) : (z - oz);
if ((ox <= radius) && (oy <= radius) && (oz <= radius))
return object;
++it;
}
return nullptr;
}
bool EntityList::MakeDoorSpawnPacket(EQApplicationPacket *app, Client *client)
{
if (door_list.empty())
return false;
uint32 mask_test = client->ClientVersionBit();
int count = 0;
auto it = door_list.begin();
while (it != door_list.end()) {
if ((it->second->GetClientVersionMask() & mask_test) &&
strlen(it->second->GetDoorName()) > 3)
count++;
++it;
}
if (count == 0 || count > 500)
return false;
uint32 length = count * sizeof(Door_Struct);
auto packet_buffer = new uchar[length];
memset(packet_buffer, 0, length);
uchar *ptr = packet_buffer;
Doors *door;
Door_Struct new_door;
it = door_list.begin();
while (it != door_list.end()) {
door = it->second;
if (door && (door->GetClientVersionMask() & mask_test) &&
strlen(door->GetDoorName()) > 3) {
memset(&new_door, 0, sizeof(new_door));
memcpy(new_door.name, door->GetDoorName(), 32);
auto position = door->GetPosition();
new_door.xPos = position.x;
new_door.yPos = position.y;
new_door.zPos = position.z;
new_door.heading = position.w;
new_door.incline = door->GetIncline();
new_door.size = door->GetSize();
new_door.doorId = door->GetDoorID();
new_door.opentype = door->GetOpenType();
Log(Logs::General, Logs::Doors, "Door timer_disable: %s door_id: %u is_open: %s invert_state: %i",
(door->GetDisableTimer() ? "true" : "false"),
door->GetDoorID(),
(door->IsDoorOpen() ? "true" : "false"),
door->GetInvertState()
);
new_door.state_at_spawn = (door->GetInvertState() ? !door->IsDoorOpen() : door->IsDoorOpen());
new_door.invert_state = door->GetInvertState();
new_door.door_param = door->GetDoorParam();
memcpy(ptr, &new_door, sizeof(new_door));
ptr += sizeof(new_door);
*(ptr - 1) = 0x01;
*(ptr - 3) = 0x01;
}
++it;
}
app->SetOpcode(OP_SpawnDoor);
app->size = length;
app->pBuffer = packet_buffer;
return true;
}
Entity *EntityList::GetEntityMob(uint16 id)
{
auto it = mob_list.find(id);
if (it != mob_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityMerc(uint16 id)
{
auto it = merc_list.find(id);
if (it != merc_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityMob(const char *name)
{
if (name == 0 || mob_list.empty())
return 0;
auto it = mob_list.begin();
while (it != mob_list.end()) {
if (strcasecmp(it->second->GetName(), name) == 0)
return it->second;
++it;
}
return nullptr;
}
Entity *EntityList::GetEntityDoor(uint16 id)
{
auto it = door_list.find(id);
if (it != door_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityCorpse(uint16 id)
{
auto it = corpse_list.find(id);
if (it != corpse_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityCorpse(const char *name)
{
if (name == 0 || corpse_list.empty())
return nullptr;
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (strcasecmp(it->second->GetName(), name) == 0)
return it->second;
++it;
}
return nullptr;
}
Entity *EntityList::GetEntityTrap(uint16 id)
{
auto it = trap_list.find(id);
if (it != trap_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityObject(uint16 id)
{
auto it = object_list.find(id);
if (it != object_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityBeacon(uint16 id)
{
auto it = beacon_list.find(id);
if (it != beacon_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetEntityEncounter(uint16 id)
{
auto it = encounter_list.find(id);
if (it != encounter_list.end())
return it->second;
return nullptr;
}
Entity *EntityList::GetID(uint16 get_id)
{
Entity *ent = 0;
if ((ent = entity_list.GetEntityMob(get_id)) != 0)
return ent;
else if ((ent=entity_list.GetEntityDoor(get_id)) != 0)
return ent;
else if ((ent=entity_list.GetEntityCorpse(get_id)) != 0)
return ent;
else if ((ent=entity_list.GetEntityObject(get_id)) != 0)
return ent;
else if ((ent=entity_list.GetEntityTrap(get_id)) != 0)
return ent;
else if ((ent=entity_list.GetEntityBeacon(get_id)) != 0)
return ent;
else if ((ent = entity_list.GetEntityEncounter(get_id)) != 0)
return ent;
else
return 0;
}
NPC *EntityList::GetNPCByNPCTypeID(uint32 npc_id)
{
if (npc_id == 0 || npc_list.empty())
return nullptr;
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->GetNPCTypeID() == npc_id)
return it->second;
++it;
}
return nullptr;
}
NPC *EntityList::GetNPCBySpawnID(uint32 spawn_id)
{
if (spawn_id == 0 || npc_list.empty()) {
return nullptr;
}
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->GetSpawnGroupId() == spawn_id) {
return it->second;
}
++it;
}
return nullptr;
}
Mob *EntityList::GetMob(uint16 get_id)
{
Entity *ent = nullptr;
if (get_id == 0)
return nullptr;
if ((ent = entity_list.GetEntityMob(get_id)))
return ent->CastToMob();
else if ((ent = entity_list.GetEntityCorpse(get_id)))
return ent->CastToMob();
return nullptr;
}
Mob *EntityList::GetMob(const char *name)
{
Entity* ent = nullptr;
if (name == 0)
return nullptr;
if ((ent = entity_list.GetEntityMob(name)))
return ent->CastToMob();
else if ((ent = entity_list.GetEntityCorpse(name)))
return ent->CastToMob();
return nullptr;
}
Mob *EntityList::GetMobByNpcTypeID(uint32 get_id)
{
if (get_id == 0 || mob_list.empty())
return 0;
auto it = mob_list.begin();
while (it != mob_list.end()) {
if (it->second->GetNPCTypeID() == get_id)
return it->second;
++it;
}
return nullptr;
}
bool EntityList::IsMobSpawnedByNpcTypeID(uint32 get_id)
{
if (get_id == 0 || npc_list.empty())
return false;
auto it = npc_list.begin();
while (it != npc_list.end()) {
// Mobs will have a 0 as their GetID() if they're dead
if (it->second->GetNPCTypeID() == get_id && it->second->GetID() != 0)
return true;
++it;
}
return false;
}
Object *EntityList::GetObjectByDBID(uint32 id)
{
if (id == 0 || object_list.empty())
return nullptr;
auto it = object_list.begin();
while (it != object_list.end()) {
if (it->second->GetDBID() == id)
return it->second;
++it;
}
return nullptr;
}
Doors *EntityList::GetDoorsByDBID(uint32 id)
{
if (id == 0 || door_list.empty())
return nullptr;
auto it = door_list.begin();
while (it != door_list.end()) {
if (it->second->GetDoorDBID() == id)
return it->second;
++it;
}
return nullptr;
}
Doors *EntityList::GetDoorsByDoorID(uint32 id)
{
if (id == 0 || door_list.empty())
return nullptr;
auto it = door_list.begin();
while (it != door_list.end()) {
if (it->second->CastToDoors()->GetDoorID() == id)
return it->second;
++it;
}
return nullptr;
}
uint16 EntityList::GetFreeID()
{
if (free_ids.empty()) { // hopefully this will never be true
// The client has a hard cap on entity count some where
// Neither the client or server performs well with a lot entities either
uint16 newid = 1500;
while (true) {
newid++;
if (GetID(newid) == nullptr)
return newid;
}
}
uint16 newid = free_ids.front();
free_ids.pop();
return newid;
}
// if no language skill is specified, sent with 100 skill
void EntityList::ChannelMessage(Mob *from, uint8 chan_num, uint8 language, const char *message, ...)
{
ChannelMessage(from, chan_num, language, 100, message);
}
void EntityList::ChannelMessage(Mob *from, uint8 chan_num, uint8 language,
uint8 lang_skill, const char *message, ...)
{
va_list argptr;
char buffer[4096];
va_start(argptr, message);
vsnprintf(buffer, 4096, message, argptr);
va_end(argptr);
auto it = client_list.begin();
while(it != client_list.end()) {
Client *client = it->second;
eqFilterType filter = FilterNone;
if (chan_num == ChatChannel_Shout) //shout
filter = FilterShouts;
else if (chan_num == ChatChannel_Auction) //auction
filter = FilterAuctions;
//
// Only say is limited in range
if (chan_num != ChatChannel_Say || Distance(client->GetPosition(), from->GetPosition()) < 200)
if (filter == FilterNone || client->GetFilter(filter) != FilterHide)
client->ChannelMessageSend(from->GetName(), 0, chan_num, language, lang_skill, buffer);
++it;
}
}
void EntityList::SendZoneSpawns(Client *client)
{
EQApplicationPacket *app;
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *ent = it->second;
if (!ent->InZone() || !ent->ShouldISpawnFor(client)) {
++it;
continue;
}
app = new EQApplicationPacket;
it->second->CastToMob()->CreateSpawnPacket(app); // TODO: Use zonespawns opcode instead
client->QueuePacket(app, true, Client::CLIENT_CONNECTED);
safe_delete(app);
++it;
}
}
void EntityList::SendZoneSpawnsBulk(Client *client)
{
NewSpawn_Struct ns{};
Mob *spawn;
EQApplicationPacket *app;
uint32 max_spawns = 100;
if (max_spawns > mob_list.size()) {
max_spawns = static_cast<uint32>(mob_list.size());
}
auto bulk_zone_spawn_packet = new BulkZoneSpawnPacket(client, max_spawns);
const glm::vec4 &client_position = client->GetPosition();
const float distance_max = (600.0 * 600.0);
for (auto & it : mob_list) {
spawn = it.second;
if (spawn && spawn->GetID() > 0 && spawn->Spawned()) {
if (!spawn->ShouldISpawnFor(client)) {
continue;
}
const glm::vec4 &spawn_position = spawn->GetPosition();
bool is_delayed_packet = (
DistanceSquared(client_position, spawn_position) > distance_max ||
(spawn->IsClient() && (spawn->GetRace() == MINOR_ILL_OBJ || spawn->GetRace() == TREE))
);
if (is_delayed_packet) {
app = new EQApplicationPacket;
spawn->CreateSpawnPacket(app);
client->QueuePacket(app, true, Client::CLIENT_CONNECTED);
safe_delete(app);
}
else {
memset(&ns, 0, sizeof(NewSpawn_Struct));
spawn->FillSpawnStruct(&ns, client);
bulk_zone_spawn_packet->AddSpawn(&ns);
}
spawn->SendArmorAppearance(client);
/**
* Original code kept for spawn packet research
*
* int32 race = spawn->GetRace();
*
* Illusion races on PCs don't work as a mass spawn
* But they will work as an add_spawn AFTER CLIENT_CONNECTED.
* if (spawn->IsClient() && (race == MINOR_ILL_OBJ || race == TREE)) {
* app = new EQApplicationPacket;
* spawn->CreateSpawnPacket(app);
* client->QueuePacket(app, true, Client::CLIENT_CONNECTED);
* safe_delete(app);
* }
* else {
* memset(&ns, 0, sizeof(NewSpawn_Struct));
* spawn->FillSpawnStruct(&ns, client);
* bzsp->AddSpawn(&ns);
* }
*
* Despite being sent in the OP_ZoneSpawns packet, the client
* does not display worn armor correctly so display it.
* spawn->SendArmorAppearance(client);
*/
}
}
safe_delete(bulk_zone_spawn_packet);
}
//this is a hack to handle a broken spawn struct
void EntityList::SendZonePVPUpdates(Client *to)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
if(c->GetPVP())
c->SendAppearancePacket(AT_PVP, c->GetPVP(), true, false, to);
++it;
}
}
void EntityList::SendZoneCorpses(Client *client)
{
EQApplicationPacket *app;
for (auto it = corpse_list.begin(); it != corpse_list.end(); ++it) {
Corpse *ent = it->second;
app = new EQApplicationPacket;
ent->CreateSpawnPacket(app);
client->QueuePacket(app, true, Client::CLIENT_CONNECTED);
safe_delete(app);
}
}
void EntityList::SendZoneCorpsesBulk(Client *client)
{
NewSpawn_Struct ns;
Corpse *spawn;
uint32 maxspawns = 100;
auto bzsp = new BulkZoneSpawnPacket(client, maxspawns);
for (auto it = corpse_list.begin(); it != corpse_list.end(); ++it) {
spawn = it->second;
if (spawn && spawn->InZone()) {
memset(&ns, 0, sizeof(NewSpawn_Struct));
spawn->FillSpawnStruct(&ns, client);
bzsp->AddSpawn(&ns);
}
}
safe_delete(bzsp);
}
void EntityList::SendZoneObjects(Client *client)
{
auto it = object_list.begin();
while (it != object_list.end()) {
auto app = new EQApplicationPacket;
it->second->CreateSpawnPacket(app);
client->FastQueuePacket(&app);
++it;
}
}
void EntityList::Save()
{
auto it = client_list.begin();
while (it != client_list.end()) {
it->second->Save();
++it;
}
}
void EntityList::ReplaceWithTarget(Mob *pOldMob, Mob *pNewTarget)
{
if (!pNewTarget)
return;
auto it = mob_list.begin();
while (it != mob_list.end()) {
if (it->second->IsAIControlled()) {
// replace the old mob with the new one
if (it->second->RemoveFromHateList(pOldMob))
it->second->AddToHateList(pNewTarget, 1, 0);
}
++it;
}
}
void EntityList::RemoveFromTargets(Mob *mob, bool RemoveFromXTargets)
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *m = it->second;
++it;
if (!m)
continue;
if (RemoveFromXTargets) {
if (m->IsClient() && (mob->CheckAggro(m) || mob->IsOnFeignMemory(m->CastToClient())))
m->CastToClient()->RemoveXTarget(mob, false);
// FadingMemories calls this function passing the client.
else if (mob->IsClient() && (m->CheckAggro(mob) || m->IsOnFeignMemory(mob->CastToClient())))
mob->CastToClient()->RemoveXTarget(m, false);
}
m->RemoveFromHateList(mob);
}
}
void EntityList::RemoveFromXTargets(Mob *mob)
{
auto it = client_list.begin();
while (it != client_list.end()) {
it->second->RemoveXTarget(mob, false);
++it;
}
}
void EntityList::RemoveFromAutoXTargets(Mob *mob)
{
auto it = client_list.begin();
while (it != client_list.end()) {
it->second->RemoveXTarget(mob, true);
++it;
}
}
void EntityList::RefreshAutoXTargets(Client *c)
{
if (!c)
return;
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *m = it->second;
++it;
if (!m || m->GetHP() <= 0)
continue;
if ((m->CheckAggro(c) || m->IsOnFeignMemory(c)) && !c->IsXTarget(m)) {
c->AddAutoXTarget(m, false); // we only call this before a bulk, so lets not send right away
break;
}
}
}
void EntityList::RefreshClientXTargets(Client *c)
{
if (!c)
return;
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c2 = it->second;
++it;
if (!c2)
continue;
if (c2->IsClientXTarget(c))
c2->UpdateClientXTarget(c);
}
}
void EntityList::QueueClientsByTarget(Mob *sender, const EQApplicationPacket *app,
bool iSendToSender, Mob *SkipThisMob, bool ackreq, bool HoTT, uint32 ClientVersionBits, bool inspect_buffs)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
++it;
Mob *Target = c->GetTarget();
if (!Target)
continue;
Mob *TargetsTarget = nullptr;
TargetsTarget = Target->GetTarget();
bool Send = false;
if (c == SkipThisMob)
continue;
if (iSendToSender)
if (c == sender)
Send = true;
if (c != sender) {
if (Target == sender) {
if (inspect_buffs) { // if inspect_buffs is true we're sending a mob's buffs to those with the LAA
if (c->GetGM() || RuleB(Spells, AlwaysSendTargetsBuffs)) {
Send = true;
} else if (c->IsRaidGrouped()) {
Raid *raid = c->GetRaid();
if (!raid)
continue;
uint32 gid = raid->GetGroup(c);
if (gid > 11 || raid->GroupCount(gid) < 3)
continue;
if (raid->GetLeadershipAA(groupAAInspectBuffs, gid))
Send = true;
} else {
Group *group = c->GetGroup();
if (!group || group->GroupCount() < 3)
continue;
if (group->GetLeadershipAA(groupAAInspectBuffs))
Send = true;
}
} else {
Send = true;
}
} else if (HoTT && TargetsTarget == sender) {
Send = true;
}
}
if (Send && (c->ClientVersionBit() & ClientVersionBits))
c->QueuePacket(app, ackreq);
}
}
void EntityList::QueueClientsByXTarget(Mob *sender, const EQApplicationPacket *app, bool iSendToSender, EQ::versions::ClientVersionBitmask client_version_bits)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
++it;
if (!c || ((c == sender) && !iSendToSender))
continue;
if ((c->ClientVersionBit() & client_version_bits) == 0)
continue;
if (!c->IsXTarget(sender))
continue;
c->QueuePacket(app);
}
}
/**
* @param sender
* @param app
* @param ignore_sender
* @param distance
* @param skipped_mob
* @param is_ack_required
* @param filter
*/
void EntityList::QueueCloseClients(
Mob *sender,
const EQApplicationPacket *app,
bool ignore_sender,
float distance,
Mob *skipped_mob,
bool is_ack_required,
eqFilterType filter
)
{
if (sender == nullptr) {
QueueClients(sender, app, ignore_sender);
return;
}
if (distance <= 0) {
distance = 600;
}
float distance_squared = distance * distance;
for (auto &e : GetCloseMobList(sender, distance)) {
Mob *mob = e.second;
if (!mob->IsClient()) {
continue;
}
Client *client = mob->CastToClient();
if ((!ignore_sender || client != sender) && (client != skipped_mob)) {
if (DistanceSquared(client->GetPosition(), sender->GetPosition()) >= distance_squared) {
continue;
}
if (!client->Connected()) {
continue;
}
eqFilterMode client_filter = client->GetFilter(filter);
if (
filter == FilterNone || client_filter == FilterShow ||
(client_filter == FilterShowGroupOnly &&
(sender == client || (client->GetGroup() && client->GetGroup()->IsGroupMember(sender)))) ||
(client_filter == FilterShowSelfOnly && client == sender)
) {
client->QueuePacket(app, is_ack_required, Client::CLIENT_CONNECTED);
}
}
}
}
//sender can be null
void EntityList::QueueClients(
Mob *sender, const EQApplicationPacket *app,
bool ignore_sender, bool ackreq
)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *ent = it->second;
if ((!ignore_sender || ent != sender))
ent->QueuePacket(app, ackreq, Client::CLIENT_CONNECTED);
++it;
}
}
void EntityList::QueueManaged(Mob *sender, const EQApplicationPacket *app,
bool ignore_sender, bool ackreq)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *ent = it->second;
if ((!ignore_sender || ent != sender))
ent->QueuePacket(app, ackreq, Client::CLIENT_CONNECTED);
++it;
}
}
void EntityList::QueueClientsStatus(Mob *sender, const EQApplicationPacket *app,
bool ignore_sender, uint8 minstatus, uint8 maxstatus)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if ((!ignore_sender || it->second != sender) &&
(it->second->Admin() >= minstatus && it->second->Admin() <= maxstatus))
it->second->QueuePacket(app);
++it;
}
}
void EntityList::DuelMessage(Mob *winner, Mob *loser, bool flee)
{
if (winner->GetLevelCon(winner->GetLevel(), loser->GetLevel()) > 2) {
std::vector<EQ::Any> args;
args.push_back(winner);
args.push_back(loser);
parse->EventPlayer(EVENT_DUEL_WIN, winner->CastToClient(), loser->GetName(), loser->CastToClient()->CharacterID(), &args);
parse->EventPlayer(EVENT_DUEL_LOSE, loser->CastToClient(), winner->GetName(), winner->CastToClient()->CharacterID(), &args);
}
auto it = client_list.begin();
while (it != client_list.end()) {
Client *cur = it->second;
//might want some sort of distance check in here?
if (cur != winner && cur != loser) {
if (flee)
cur->MessageString(Chat::Yellow, DUEL_FLED, winner->GetName(),loser->GetName(),loser->GetName());
else
cur->MessageString(Chat::Yellow, DUEL_FINISHED, winner->GetName(),loser->GetName());
}
++it;
}
}
Client *EntityList::GetClientByName(const char *checkname)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (strcasecmp(it->second->GetName(), checkname) == 0)
return it->second;
++it;
}
return nullptr;
}
Client *EntityList::GetClientByCharID(uint32 iCharID)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->CharacterID() == iCharID)
return it->second;
++it;
}
return nullptr;
}
Client *EntityList::GetClientByWID(uint32 iWID)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->GetWID() == iWID) {
return it->second;
}
++it;
}
return nullptr;
}
Client *EntityList::GetClientByLSID(uint32 iLSID)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->LSAccountID() == iLSID) {
return it->second;
}
++it;
}
return nullptr;
}
Client *EntityList::GetRandomClient(const glm::vec3& location, float Distance, Client *ExcludeClient)
{
std::vector<Client *> ClientsInRange;
for (auto it = client_list.begin();it != client_list.end(); ++it)
if ((it->second != ExcludeClient) && (DistanceSquared(static_cast<glm::vec3>(it->second->GetPosition()), location) <= Distance))
ClientsInRange.push_back(it->second);
if (ClientsInRange.empty())
return nullptr;
return ClientsInRange[zone->random.Int(0, ClientsInRange.size() - 1)];
}
Corpse *EntityList::GetCorpseByOwner(Client *client)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->IsPlayerCorpse())
if (strcasecmp(it->second->GetOwnerName(), client->GetName()) == 0)
return it->second;
++it;
}
return nullptr;
}
Corpse *EntityList::GetCorpseByOwnerWithinRange(Client *client, Mob *center, int range)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->IsPlayerCorpse())
if (DistanceSquaredNoZ(center->GetPosition(), it->second->GetPosition()) < range &&
strcasecmp(it->second->GetOwnerName(), client->GetName()) == 0)
return it->second;
++it;
}
return nullptr;
}
Corpse *EntityList::GetCorpseByDBID(uint32 dbid)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->GetCorpseDBID() == dbid)
return it->second;
++it;
}
return nullptr;
}
Corpse *EntityList::GetCorpseByName(const char *name)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (strcmp(it->second->GetName(), name) == 0)
return it->second;
++it;
}
return nullptr;
}
Spawn2 *EntityList::GetSpawnByID(uint32 id)
{
if (!zone || !zone->IsLoaded())
return nullptr;
LinkedListIterator<Spawn2 *> iterator(zone->spawn2_list);
iterator.Reset();
while(iterator.MoreElements())
{
if(iterator.GetData()->GetID() == id) {
return iterator.GetData();
}
iterator.Advance();
}
return nullptr;
}
void EntityList::RemoveAllCorpsesByCharID(uint32 charid)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->GetCharID() == charid) {
safe_delete(it->second);
free_ids.push(it->first);
it = corpse_list.erase(it);
} else {
++it;
}
}
}
void EntityList::RemoveCorpseByDBID(uint32 dbid)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->GetCorpseDBID() == dbid) {
safe_delete(it->second);
free_ids.push(it->first);
it = corpse_list.erase(it);
} else {
++it;
}
}
}
int EntityList::RezzAllCorpsesByCharID(uint32 charid)
{
int RezzExp = 0;
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->GetCharID() == charid) {
RezzExp += it->second->GetRezExp();
it->second->IsRezzed(true);
it->second->CompleteResurrection();
}
++it;
}
return RezzExp;
}
Group *EntityList::GetGroupByMob(Mob *mob)
{
std::list<Group *>::iterator iterator;
iterator = group_list.begin();
while (iterator != group_list.end()) {
if ((*iterator)->IsGroupMember(mob))
return *iterator;
++iterator;
}
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
return nullptr;
}
Group *EntityList::GetGroupByLeaderName(const char *leader)
{
std::list<Group *>::iterator iterator;
iterator = group_list.begin();
while (iterator != group_list.end()) {
if (!strcmp((*iterator)->GetLeaderName(), leader))
return *iterator;
++iterator;
}
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
return nullptr;
}
Group *EntityList::GetGroupByID(uint32 group_id)
{
std::list<Group *>::iterator iterator;
iterator = group_list.begin();
while (iterator != group_list.end()) {
if ((*iterator)->GetID() == group_id)
return *iterator;
++iterator;
}
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
return nullptr;
}
Group *EntityList::GetGroupByClient(Client *client)
{
std::list <Group *>::iterator iterator;
iterator = group_list.begin();
while (iterator != group_list.end()) {
if ((*iterator)->IsGroupMember(client->CastToMob()))
return *iterator;
++iterator;
}
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
return nullptr;
}
Raid *EntityList::GetRaidByLeaderName(const char *leader)
{
std::list<Raid *>::iterator iterator;
iterator = raid_list.begin();
while (iterator != raid_list.end()) {
if ((*iterator)->GetLeader())
if(strcmp((*iterator)->GetLeader()->GetName(), leader) == 0)
return *iterator;
++iterator;
}
return nullptr;
}
Raid *EntityList::GetRaidByID(uint32 id)
{
std::list<Raid *>::iterator iterator;
iterator = raid_list.begin();
while (iterator != raid_list.end()) {
if ((*iterator)->GetID() == id)
return *iterator;
++iterator;
}
return nullptr;
}
Raid *EntityList::GetRaidByClient(Client* client)
{
if (client->p_raid_instance) {
return client->p_raid_instance;
}
std::list<Raid *>::iterator iterator;
iterator = raid_list.begin();
while (iterator != raid_list.end()) {
for (auto &member : (*iterator)->members) {
if (member.member) {
if (member.member == client) {
client->p_raid_instance = *iterator;
return *iterator;
}
}
}
++iterator;
}
return nullptr;
}
Raid *EntityList::GetRaidByMob(Mob *mob)
{
std::list<Raid *>::iterator iterator;
iterator = raid_list.begin();
while (iterator != raid_list.end()) {
for(int x = 0; x < MAX_RAID_MEMBERS; x++) {
// TODO: Implement support for Mob objects in Raid class
/*if((*iterator)->members[x].member){
if((*iterator)->members[x].member == mob)
return *iterator;
}*/
}
++iterator;
}
return nullptr;
}
Client *EntityList::GetClientByAccID(uint32 accid)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->AccountID() == accid)
return it->second;
++it;
}
return nullptr;
}
void EntityList::ChannelMessageFromWorld(const char *from, const char *to,
uint8 chan_num, uint32 guild_id, uint8 language, uint8 lang_skill, const char *message)
{
for (auto it = client_list.begin(); it != client_list.end(); ++it) {
Client *client = it->second;
if (chan_num == ChatChannel_Guild) {
if (!client->IsInGuild(guild_id))
continue;
if (!guild_mgr.CheckPermission(guild_id, client->GuildRank(), GUILD_HEAR))
continue;
if (client->GetFilter(FilterGuildChat) == FilterHide)
continue;
} else if (chan_num == ChatChannel_OOC) {
if (client->GetFilter(FilterOOC) == FilterHide)
continue;
}
client->ChannelMessageSend(from, to, chan_num, language, lang_skill, message);
}
}
void EntityList::Message(uint32 to_guilddbid, uint32 type, const char *message, ...)
{
va_list argptr;
char buffer[4096];
va_start(argptr, message);
vsnprintf(buffer, 4096, message, argptr);
va_end(argptr);
auto it = client_list.begin();
while (it != client_list.end()) {
Client *client = it->second;
if (to_guilddbid == 0 || client->IsInGuild(to_guilddbid))
client->Message(type, buffer);
++it;
}
}
void EntityList::QueueClientsGuild(Mob *sender, const EQApplicationPacket *app,
bool ignore_sender, uint32 guild_id)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *client = it->second;
if (client->IsInGuild(guild_id))
client->QueuePacket(app);
++it;
}
}
void EntityList::QueueClientsGuildBankItemUpdate(const GuildBankItemUpdate_Struct *gbius, uint32 GuildID)
{
auto outapp = new EQApplicationPacket(OP_GuildBank, sizeof(GuildBankItemUpdate_Struct));
GuildBankItemUpdate_Struct *outgbius = (GuildBankItemUpdate_Struct*)outapp->pBuffer;
memcpy(outgbius, gbius, sizeof(GuildBankItemUpdate_Struct));
const EQ::ItemData *Item = database.GetItem(gbius->ItemID);
auto it = client_list.begin();
while (it != client_list.end()) {
Client *client = it->second;
if (client->IsInGuild(GuildID)) {
if (Item && (gbius->Permissions == GuildBankPublicIfUsable))
outgbius->Useable = Item->IsEquipable(client->GetBaseRace(), client->GetBaseClass());
client->QueuePacket(outapp);
}
++it;
}
safe_delete(outapp);
}
void EntityList::MessageStatus(uint32 to_guild_id, int to_minstatus, uint32 type, const char *message, ...)
{
va_list argptr;
char buffer[4096];
va_start(argptr, message);
vsnprintf(buffer, 4096, message, argptr);
va_end(argptr);
auto it = client_list.begin();
while (it != client_list.end()) {
Client *client = it->second;
if ((to_guild_id == 0 || client->IsInGuild(to_guild_id)) && client->Admin() >= to_minstatus) {
client->Message(type, buffer);
}
++it;
}
}
/**
* @param sender
* @param skipsender
* @param dist
* @param type
* @param string_id
* @param message1
* @param message2
* @param message3
* @param message4
* @param message5
* @param message6
* @param message7
* @param message8
* @param message9
*/
void EntityList::MessageCloseString(
Mob *sender,
bool skipsender,
float dist,
uint32 type,
uint32 string_id,
const char *message1,
const char *message2,
const char *message3,
const char *message4,
const char *message5,
const char *message6,
const char *message7,
const char *message8,
const char *message9
)
{
Client *c;
float dist2 = dist * dist;
for (auto & it : client_list) {
c = it.second;
if (c && DistanceSquared(c->GetPosition(), sender->GetPosition()) <= dist2 && (!skipsender || c != sender)) {
c->MessageString(
type,
string_id,
message1,
message2,
message3,
message4,
message5,
message6,
message7,
message8,
message9
);
}
}
}
/**
* @param sender
* @param skipsender
* @param dist
* @param type
* @param filter
* @param string_id
* @param message1
* @param message2
* @param message3
* @param message4
* @param message5
* @param message6
* @param message7
* @param message8
* @param message9
*/
void EntityList::FilteredMessageCloseString(
Mob *sender, bool skipsender,
float dist,
uint32 type,
eqFilterType filter,
uint32 string_id,
const char *message1,
const char *message2,
const char *message3,
const char *message4,
const char *message5,
const char *message6,
const char *message7,
const char *message8,
const char *message9
)
{
Client *c;
float dist2 = dist * dist;
for (auto & it : client_list) {
c = it.second;
if (c && DistanceSquared(c->GetPosition(), sender->GetPosition()) <= dist2 && (!skipsender || c != sender)) {
c->FilteredMessageString(
sender, type, filter, string_id,
message1, message2, message3, message4, message5,
message6, message7, message8, message9
);
}
}
}
/**
*
* @param sender
* @param skipsender
* @param type
* @param string_id
* @param message1
* @param message2
* @param message3
* @param message4
* @param message5
* @param message6
* @param message7
* @param message8
* @param message9
*/
void EntityList::MessageString(
Mob *sender,
bool skipsender,
uint32 type,
uint32 string_id,
const char *message1,
const char *message2,
const char *message3,
const char *message4,
const char *message5,
const char *message6,
const char *message7,
const char *message8,
const char *message9
)
{
Client *c;
for (auto & it : client_list) {
c = it.second;
if (c && (!skipsender || c != sender)) {
c->MessageString(
type,
string_id,
message1,
message2,
message3,
message4,
message5,
message6,
message7,
message8,
message9
);
}
}
}
/**
*
* @param sender
* @param skipsender
* @param type
* @param filter
* @param string_id
* @param message1
* @param message2
* @param message3
* @param message4
* @param message5
* @param message6
* @param message7
* @param message8
* @param message9
*/
void EntityList::FilteredMessageString(
Mob *sender,
bool skipsender,
uint32 type,
eqFilterType filter,
uint32 string_id,
const char *message1,
const char *message2,
const char *message3,
const char *message4,
const char *message5,
const char *message6,
const char *message7,
const char *message8,
const char *message9
)
{
Client *c;
for (auto & it : client_list) {
c = it.second;
if (c && (!skipsender || c != sender)) {
c->FilteredMessageString(
sender,
type,
filter,
string_id,
message1,
message2,
message3,
message4,
message5,
message6,
message7,
message8,
message9
);
}
}
}
/**
* @param sender
* @param skipsender
* @param dist
* @param type
* @param message
* @param ...
*/
void EntityList::MessageClose(Mob *sender, bool skipsender, float dist, uint32 type, const char *message, ...)
{
va_list argptr;
char buffer[4096];
va_start(argptr, message);
vsnprintf(buffer, 4095, message, argptr);
va_end(argptr);
float dist2 = dist * dist;
auto it = client_list.begin();
while (it != client_list.end()) {
if (DistanceSquared(it->second->GetPosition(), sender->GetPosition()) <= dist2 &&
(!skipsender || it->second != sender)) {
it->second->Message(type, buffer);
}
++it;
}
}
void EntityList::FilteredMessageClose(
Mob *sender,
bool skipsender,
float dist,
uint32 type,
eqFilterType filter,
const char *message,
...
)
{
va_list argptr;
char buffer[4096];
va_start(argptr, message);
vsnprintf(buffer, 4095, message, argptr);
va_end(argptr);
float dist2 = dist * dist;
auto it = client_list.begin();
while (it != client_list.end()) {
if (DistanceSquared(it->second->GetPosition(), sender->GetPosition()) <= dist2 &&
(!skipsender || it->second != sender)) {
it->second->FilteredMessage(sender, type, filter, buffer);
}
++it;
}
}
void EntityList::RemoveAllMobs()
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
it = mob_list.erase(it);
}
}
void EntityList::RemoveAllClients()
{
// doesn't clear the data
client_list.clear();
}
void EntityList::RemoveAllNPCs()
{
// doesn't clear the data
npc_list.clear();
npc_limit_list.clear();
}
void EntityList::RemoveAllMercs()
{
// doesn't clear the data
merc_list.clear();
}
void EntityList::RemoveAllGroups()
{
while (group_list.size()) {
auto group = group_list.front();
group_list.pop_front();
safe_delete(group);
}
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
}
void EntityList::RemoveAllRaids()
{
while (raid_list.size()) {
auto raid = raid_list.front();
raid_list.pop_front();
safe_delete(raid);
}
}
void EntityList::RemoveAllDoors()
{
auto it = door_list.begin();
while (it != door_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
it = door_list.erase(it);
}
DespawnAllDoors();
}
void EntityList::DespawnAllDoors()
{
auto outapp = new EQApplicationPacket(OP_RemoveAllDoors, 0);
for (auto it = client_list.begin(); it != client_list.end(); ++it) {
if (it->second) {
it->second->QueuePacket(outapp, true, Client::CLIENT_CONNECTED);
}
}
safe_delete(outapp);
}
void EntityList::RespawnAllDoors()
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second) {
auto outapp = new EQApplicationPacket();
MakeDoorSpawnPacket(outapp, it->second);
it->second->FastQueuePacket(&outapp, true, Client::CLIENT_CONNECTED);
}
++it;
}
}
void EntityList::RemoveAllCorpses()
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
it = corpse_list.erase(it);
}
}
void EntityList::RemoveAllObjects()
{
auto it = object_list.begin();
while (it != object_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
it = object_list.erase(it);
}
}
void EntityList::RemoveAllTraps()
{
auto it = trap_list.begin();
while (it != trap_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
it = trap_list.erase(it);
}
}
void EntityList::RemoveAllEncounters()
{
auto it = encounter_list.begin();
while (it != encounter_list.end()) {
parse->RemoveEncounter(it->second->GetEncounterName());
safe_delete(it->second);
free_ids.push(it->first);
it = encounter_list.erase(it);
}
}
/**
* @param delete_id
* @return
*/
bool EntityList::RemoveMob(uint16 delete_id)
{
if (delete_id == 0) {
return true;
}
auto it = mob_list.find(delete_id);
if (it != mob_list.end()) {
if (npc_list.count(delete_id)) {
entity_list.RemoveNPC(delete_id);
}
else if (client_list.count(delete_id)) {
entity_list.RemoveClient(delete_id);
}
safe_delete(it->second);
if (!corpse_list.count(delete_id)) {
free_ids.push(it->first);
}
mob_list.erase(it);
return true;
}
return false;
}
/**
* @param delete_mob
* @return
*/
bool EntityList::RemoveMob(Mob *delete_mob)
{
if (delete_mob == 0) {
return true;
}
auto it = mob_list.begin();
while (it != mob_list.end()) {
if (it->second == delete_mob) {
safe_delete(it->second);
if (!corpse_list.count(it->first)) {
free_ids.push(it->first);
}
mob_list.erase(it);
return true;
}
++it;
}
return false;
}
/**
* @param delete_id
* @return
*/
bool EntityList::RemoveNPC(uint16 delete_id)
{
auto it = npc_list.find(delete_id);
if (it != npc_list.end()) {
NPC *npc = it->second;
RemoveProximity(delete_id);
npc_list.erase(it);
if (npc_limit_list.count(delete_id)) {
npc_limit_list.erase(delete_id);
}
return true;
}
return false;
}
/**
* @param mob
* @return
*/
bool EntityList::RemoveMobFromCloseLists(Mob *mob)
{
uint16 entity_id = mob->GetID() > 0 ? mob->GetID() : mob->GetInitialId();
LogEntityManagement(
"Attempting to remove mob [{}] from close lists entity_id ({})",
mob->GetCleanName(),
entity_id
);
auto it = mob_list.begin();
while (it != mob_list.end()) {
LogEntityManagement(
"Removing mob [{}] from [{}] close list entity_id ({})",
mob->GetCleanName(),
it->second->GetCleanName(),
entity_id
);
it->second->close_mobs.erase(entity_id);
++it;
}
return false;
}
/**
* @param mob
* @return
*/
void EntityList::RemoveAuraFromMobs(Mob *aura)
{
LogEntityManagement(
"Attempting to remove aura [{}] from mobs entity_id ({})",
aura->GetCleanName(),
aura->GetID()
);
for (auto &it : mob_list) {
auto mob = it.second;
mob->RemoveAura(aura->GetID());
}
}
/**
* The purpose of this system is so that we cache relevant entities that are "close"
*
* In general; it becomes incredibly expensive to run zone-wide checks against every single mob in the zone when in reality
* we only care about entities closest to us
*
* A very simple example of where this is relevant is Aggro, the below example is skewed because the overall implementation
* of Aggro was also tweaked in conjunction with close lists. We also scan more aggressively when entities are moving (1-6 seconds)
* versus 60 seconds when idle. We also have entities that are moving add themselves to those closest to them so that their close
* lists remain always up to date
*
* Before: Aggro checks for NPC to Client aggro | (40 clients in zone) x (525 npcs) x 2 (times a second) = 2,520,000 checks a minute
* After: Aggro checks for NPC to Client aggro | (40 clients in zone) x (20-30 npcs) x 2 (times a second) = 144,000 checks a minute (This is actually far less today)
*
* Places in the code where this logic makes a huge impact
*
* Aggro checks (zone wide -> close)
* Aura processing (zone wide -> close)
* AE Taunt (zone wide -> close)
* AOE Spells (zone wide -> close)
* Bard Pulse AOE (zone wide -> close)
* Mass Group Buff (zone wide -> close)
* AE Attack (zone wide -> close)
* Packet QueueCloseClients (zone wide -> close)
* Check Close Beneficial Spells (Buffs; should I heal other npcs) (zone wide -> close)
* AI Yell for Help (NPC Assist other NPCs) (zone wide -> close)
*
* All of the above makes a tremendous impact on the bottom line of cpu cycle performance because we run an order of magnitude
* less checks by focusing our hot path logic down to a very small subset of relevant entities instead of looping an entire
* entity list (zone wide)
*
* @param close_mobs
* @param scanning_mob
*/
void EntityList::ScanCloseMobs(
std::unordered_map<uint16, Mob *> &close_mobs,
Mob *scanning_mob,
bool add_self_to_other_lists
)
{
float scan_range = RuleI(Range, MobCloseScanDistance) * RuleI(Range, MobCloseScanDistance);
close_mobs.clear();
for (auto &e : mob_list) {
auto mob = e.second;
if (!mob->IsNPC() && !mob->IsClient()) {
continue;
}
if (mob->GetID() <= 0) {
continue;
}
float distance = DistanceSquared(scanning_mob->GetPosition(), mob->GetPosition());
if (distance <= scan_range || mob->GetAggroRange() >= scan_range) {
close_mobs.insert(std::pair<uint16, Mob *>(mob->GetID(), mob));
if (add_self_to_other_lists && scanning_mob->GetID() > 0) {
bool has_mob = false;
for (auto &cm: mob->close_mobs) {
if (scanning_mob->GetID() == cm.first) {
has_mob = true;
break;
}
}
if (!has_mob) {
mob->close_mobs.insert(std::pair<uint16, Mob *>(scanning_mob->GetID(), scanning_mob));
}
}
}
}
LogAIScanClose(
"[{}] Scanning Close List | list_size [{}] moving [{}]",
scanning_mob->GetCleanName(),
close_mobs.size(),
scanning_mob->IsMoving() ? "true" : "false"
);
}
bool EntityList::RemoveMerc(uint16 delete_id)
{
auto it = merc_list.find(delete_id);
if (it != merc_list.end()) {
merc_list.erase(it); // Already Deleted
return true;
}
return false;
}
bool EntityList::RemoveClient(uint16 delete_id)
{
auto it = client_list.find(delete_id);
if (it != client_list.end()) {
client_list.erase(it); // Already deleted
return true;
}
return false;
}
// If our ID was deleted already
bool EntityList::RemoveClient(Client *delete_client)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second == delete_client) {
client_list.erase(it);
return true;
}
++it;
}
return false;
}
bool EntityList::RemoveObject(uint16 delete_id)
{
auto it = object_list.find(delete_id);
if (it != object_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
object_list.erase(it);
return true;
}
return false;
}
bool EntityList::RemoveTrap(uint16 delete_id)
{
auto it = trap_list.find(delete_id);
if (it != trap_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
trap_list.erase(it);
return true;
}
return false;
}
bool EntityList::RemoveDoor(uint16 delete_id)
{
auto it = door_list.find(delete_id);
if (it != door_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
door_list.erase(it);
return true;
}
return false;
}
bool EntityList::RemoveCorpse(uint16 delete_id)
{
auto it = corpse_list.find(delete_id);
if (it != corpse_list.end()) {
safe_delete(it->second);
free_ids.push(it->first);
corpse_list.erase(it);
return true;
}
return false;
}
bool EntityList::RemoveGroup(uint32 delete_id)
{
auto it = std::find_if(group_list.begin(), group_list.end(),
[delete_id](const Group *a) { return a->GetID() == delete_id; });
if (it == group_list.end()) {
#if EQDEBUG >= 5
CheckGroupList (__FILE__, __LINE__);
#endif
return false;
}
auto group = *it;
group_list.erase(it);
safe_delete(group);
return true;
}
bool EntityList::RemoveRaid(uint32 delete_id)
{
auto it = std::find_if(raid_list.begin(), raid_list.end(),
[delete_id](const Raid *a) { return a->GetID() == delete_id; });
if (it == raid_list.end())
return false;
auto raid = *it;
raid_list.erase(it);
safe_delete(raid);
return true;
}
void EntityList::Clear()
{
RemoveAllClients();
entity_list.RemoveAllTraps(); //we can have child npcs so we go first
entity_list.RemoveAllNPCs();
entity_list.RemoveAllMobs();
entity_list.RemoveAllCorpses();
entity_list.RemoveAllGroups();
entity_list.RemoveAllDoors();
entity_list.RemoveAllObjects();
entity_list.RemoveAllRaids();
entity_list.RemoveAllLocalities();
}
void EntityList::UpdateWho(bool iSendFullUpdate)
{
if ((!worldserver.Connected()) || !is_zone_loaded)
return;
uint32 tmpNumUpdates = numclients + 5;
ServerPacket* pack = 0;
ServerClientListKeepAlive_Struct* sclka = 0;
if (!iSendFullUpdate) {
pack = new ServerPacket(ServerOP_ClientListKA, sizeof(ServerClientListKeepAlive_Struct) + (tmpNumUpdates * 4));
sclka = (ServerClientListKeepAlive_Struct*) pack->pBuffer;
}
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->InZone()) {
if (iSendFullUpdate) {
it->second->UpdateWho();
} else {
if (sclka->numupdates >= tmpNumUpdates) {
tmpNumUpdates += 10;
uint8* tmp = pack->pBuffer;
pack->pBuffer = new uint8[sizeof(ServerClientListKeepAlive_Struct) + (tmpNumUpdates * 4)];
memset(pack->pBuffer, 0, sizeof(ServerClientListKeepAlive_Struct) + (tmpNumUpdates * 4));
memcpy(pack->pBuffer, tmp, pack->size);
pack->size = sizeof(ServerClientListKeepAlive_Struct) + (tmpNumUpdates * 4);
safe_delete_array(tmp);
sclka = (ServerClientListKeepAlive_Struct*) pack->pBuffer;
}
sclka->wid[sclka->numupdates] = it->second->GetWID();
sclka->numupdates++;
}
}
++it;
}
if (!iSendFullUpdate) {
pack->size = sizeof(ServerClientListKeepAlive_Struct) + (sclka->numupdates * 4);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void EntityList::RemoveEntity(uint16 id)
{
if (id == 0)
return;
if (entity_list.RemoveMob(id))
return;
else if (entity_list.RemoveCorpse(id))
return;
else if (entity_list.RemoveDoor(id))
return;
else if (entity_list.RemoveGroup(id))
return;
else if (entity_list.RemoveTrap(id))
return;
else if (entity_list.RemoveMerc(id))
return;
#ifdef BOTS
// This block of code is necessary to clean up bot objects
else if (entity_list.RemoveBot(id))
return;
#endif //BOTS
else
entity_list.RemoveObject(id);
}
void EntityList::Process()
{
CheckSpawnQueue();
}
void EntityList::CountNPC(uint32 *NPCCount, uint32 *NPCLootCount, uint32 *gmspawntype_count)
{
*NPCCount = 0;
*NPCLootCount = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
(*NPCCount)++;
(*NPCLootCount) += it->second->CountLoot();
if (it->second->GetNPCTypeID() == 0)
(*gmspawntype_count)++;
++it;
}
}
void EntityList::Depop(bool StartSpawnTimer)
{
for (auto it = npc_list.begin(); it != npc_list.end(); ++it) {
NPC *pnpc = it->second;
if (pnpc) {
Mob *own = pnpc->GetOwner();
//do not depop player's pets...
if (own && own->IsClient())
continue;
if (pnpc->IsHorse())
continue;
if (pnpc->IsFindable())
UpdateFindableNPCState(pnpc, true);
pnpc->WipeHateList();
pnpc->Depop(StartSpawnTimer);
}
}
}
void EntityList::DepopAll(int NPCTypeID, bool StartSpawnTimer)
{
for (auto it = npc_list.begin(); it != npc_list.end(); ++it) {
NPC *pnpc = it->second;
if (pnpc && (pnpc->GetNPCTypeID() == (uint32)NPCTypeID))
pnpc->Depop(StartSpawnTimer);
}
}
void EntityList::SendTraders(Client *client)
{
Client *trader = nullptr;
auto it = client_list.begin();
while (it != client_list.end()) {
trader = it->second;
if (trader->IsTrader())
client->SendTraderPacket(trader);
if (trader->IsBuyer())
client->SendBuyerPacket(trader);
++it;
}
}
void EntityList::RemoveFromHateLists(Mob *mob, bool settoone)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->CheckAggro(mob)) {
if (!settoone) {
it->second->RemoveFromHateList(mob);
if (mob->IsClient())
mob->CastToClient()->RemoveXTarget(it->second, false); // gotta do book keeping
} else {
it->second->SetHateAmountOnEnt(mob, 1);
}
}
++it;
}
}
void EntityList::RemoveDebuffs(Mob *caster)
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
it->second->BuffFadeDetrimentalByCaster(caster);
++it;
}
}
char *EntityList::MakeNameUnique(char *name)
{
bool used[300];
memset(used, 0, sizeof(used));
name[61] = 0; name[62] = 0; name[63] = 0;
int len = strlen(name);
auto it = mob_list.begin();
while (it != mob_list.end()) {
if (it->second->IsMob()) {
if (strncasecmp(it->second->CastToMob()->GetName(), name, len) == 0) {
if (Seperator::IsNumber(&it->second->CastToMob()->GetName()[len])) {
used[atoi(&it->second->CastToMob()->GetName()[len])] = true;
}
}
}
++it;
}
for (int i=0; i < 300; i++) {
if (!used[i]) {
#ifdef _WINDOWS
snprintf(name, 64, "%s%03d", name, i);
#else
//glibc clears destination of snprintf
//make a copy of name before snprintf--misanthropicfiend
char temp_name[64];
strn0cpy(temp_name, name, 64);
snprintf(name, 64, "%s%03d", temp_name, i);
#endif
return name;
}
}
LogError("Fatal error in EntityList::MakeNameUnique: Unable to find unique name for [{}]", name);
char tmp[64] = "!";
strn0cpy(&tmp[1], name, sizeof(tmp) - 1);
strcpy(name, tmp);
return MakeNameUnique(name);
}
char *EntityList::RemoveNumbers(char *name)
{
char tmp[64];
memset(tmp, 0, sizeof(tmp));
int k = 0;
for (unsigned int i=0; i<strlen(name) && i<sizeof(tmp); i++) {
if (name[i] < '0' || name[i] > '9')
tmp[k++] = name[i];
}
strn0cpy(name, tmp, sizeof(tmp));
return name;
}
void EntityList::ListNPCCorpses(Client *client)
{
uint32 x = 0;
auto it = corpse_list.begin();
client->Message(Chat::White, "NPC Corpses in the zone:");
while (it != corpse_list.end()) {
if (it->second->IsNPCCorpse()) {
client->Message(Chat::White, " %5d: %s", it->first, it->second->GetName());
x++;
}
++it;
}
client->Message(Chat::White, "%d npc corpses listed.", x);
}
void EntityList::ListPlayerCorpses(Client *client)
{
uint32 x = 0;
auto it = corpse_list.begin();
client->Message(Chat::White, "Player Corpses in the zone:");
while (it != corpse_list.end()) {
if (it->second->IsPlayerCorpse()) {
client->Message(Chat::White, " %5d: %s", it->first, it->second->GetName());
x++;
}
++it;
}
client->Message(Chat::White, "%d player corpses listed.", x);
}
// returns the number of corpses deleted. A negative number indicates an error code.
int32 EntityList::DeleteNPCCorpses()
{
int32 x = 0;
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->IsNPCCorpse()) {
it->second->DepopNPCCorpse();
x++;
}
++it;
}
return x;
}
void EntityList::CorpseFix(Client* c)
{
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
Corpse* corpse = it->second;
if (corpse->IsNPCCorpse()) {
if (DistanceNoZ(c->GetPosition(), corpse->GetPosition()) < 100) {
c->Message(Chat::Yellow, "Attempting to fix %s", it->second->GetCleanName());
corpse->GMMove(corpse->GetX(), corpse->GetY(), c->GetZ() + 2, 0);
}
}
++it;
}
}
// returns the number of corpses deleted. A negative number indicates an error code.
int32 EntityList::DeletePlayerCorpses()
{
int32 x = 0;
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
if (it->second->IsPlayerCorpse()) {
it->second->CastToCorpse()->Delete();
x++;
}
++it;
}
return x;
}
void EntityList::SendPetitionToAdmins()
{
auto outapp = new EQApplicationPacket(OP_PetitionUpdate, sizeof(PetitionUpdate_Struct));
PetitionUpdate_Struct *pcus = (PetitionUpdate_Struct*) outapp->pBuffer;
pcus->petnumber = 0; // Petition Number
pcus->color = 0;
pcus->status = 0xFFFFFFFF;
pcus->senttime = 0;
strcpy(pcus->accountid, "");
strcpy(pcus->gmsenttoo, "");
pcus->quetotal=0;
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->CastToClient()->Admin() >= 80)
it->second->CastToClient()->QueuePacket(outapp);
++it;
}
safe_delete(outapp);
}
void EntityList::SendPetitionToAdmins(Petition *pet)
{
auto outapp = new EQApplicationPacket(OP_PetitionUpdate, sizeof(PetitionUpdate_Struct));
PetitionUpdate_Struct *pcus = (PetitionUpdate_Struct*) outapp->pBuffer;
pcus->petnumber = pet->GetID(); // Petition Number
if (pet->CheckedOut()) {
pcus->color = 0x00;
pcus->status = 0xFFFFFFFF;
pcus->senttime = pet->GetSentTime();
strcpy(pcus->accountid, "");
strcpy(pcus->gmsenttoo, "");
} else {
pcus->color = pet->GetUrgency(); // 0x00 = green, 0x01 = yellow, 0x02 = red
pcus->status = pet->GetSentTime();
pcus->senttime = pet->GetSentTime(); // 4 has to be 0x1F
strcpy(pcus->accountid, pet->GetAccountName());
strcpy(pcus->charname, pet->GetCharName());
}
pcus->quetotal = petition_list.GetTotalPetitions();
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->CastToClient()->Admin() >= 80) {
if (pet->CheckedOut())
strcpy(pcus->gmsenttoo, "");
else
strcpy(pcus->gmsenttoo, it->second->CastToClient()->GetName());
it->second->CastToClient()->QueuePacket(outapp);
}
++it;
}
safe_delete(outapp);
}
void EntityList::ClearClientPetitionQueue()
{
auto outapp = new EQApplicationPacket(OP_PetitionUpdate, sizeof(PetitionUpdate_Struct));
PetitionUpdate_Struct *pet = (PetitionUpdate_Struct*) outapp->pBuffer;
pet->color = 0x00;
pet->status = 0xFFFFFFFF;
pet->senttime = 0;
strcpy(pet->accountid, "");
strcpy(pet->gmsenttoo, "");
strcpy(pet->charname, "");
pet->quetotal = petition_list.GetTotalPetitions();
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->CastToClient()->Admin() >= 100) {
int x = 0;
for (x = 0; x < 64; x++) {
pet->petnumber = x;
it->second->CastToClient()->QueuePacket(outapp);
}
}
++it;
}
safe_delete(outapp);
return;
}
void EntityList::WriteEntityIDs()
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
std::cout << "ID: " << it->first << " Name: " << it->second->GetName() << std::endl;
++it;
}
}
BulkZoneSpawnPacket::BulkZoneSpawnPacket(Client *iSendTo, uint32 iMaxSpawnsPerPacket)
{
data = nullptr;
pSendTo = iSendTo;
pMaxSpawnsPerPacket = iMaxSpawnsPerPacket;
}
BulkZoneSpawnPacket::~BulkZoneSpawnPacket()
{
SendBuffer();
safe_delete_array(data);
}
bool BulkZoneSpawnPacket::AddSpawn(NewSpawn_Struct *ns)
{
if (!data) {
data = new NewSpawn_Struct[pMaxSpawnsPerPacket];
memset(data, 0, sizeof(NewSpawn_Struct) * pMaxSpawnsPerPacket);
index = 0;
}
memcpy(&data[index], ns, sizeof(NewSpawn_Struct));
index++;
if (index >= pMaxSpawnsPerPacket) {
SendBuffer();
return true;
}
return false;
}
void BulkZoneSpawnPacket::SendBuffer()
{
if (!data)
return;
uint32 tmpBufSize = (index * sizeof(NewSpawn_Struct));
auto outapp = new EQApplicationPacket(OP_ZoneSpawns, (unsigned char *)data, tmpBufSize);
if (pSendTo) {
pSendTo->FastQueuePacket(&outapp);
} else {
entity_list.QueueClients(0, outapp);
safe_delete(outapp);
}
memset(data, 0, sizeof(NewSpawn_Struct) * pMaxSpawnsPerPacket);
index = 0;
}
void EntityList::DoubleAggro(Mob *who)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->CheckAggro(who))
it->second->SetHateAmountOnEnt(who, it->second->CastToNPC()->GetHateAmount(who),
it->second->CastToNPC()->GetHateAmount(who) * 2);
++it;
}
}
void EntityList::HalveAggro(Mob *who)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->CastToNPC()->CheckAggro(who))
it->second->CastToNPC()->SetHateAmountOnEnt(who, it->second->CastToNPC()->GetHateAmount(who) / 2);
++it;
}
}
void EntityList::Evade(Mob *who)
{
uint32 flatval = who->GetLevel() * 13;
int amt = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->CastToNPC()->CheckAggro(who)) {
amt = it->second->CastToNPC()->GetHateAmount(who);
amt -= flatval;
if (amt > 0)
it->second->CastToNPC()->SetHateAmountOnEnt(who, amt);
else
it->second->CastToNPC()->SetHateAmountOnEnt(who, 0);
}
++it;
}
}
//removes "targ" from all hate lists, including feigned, in the zone
void EntityList::ClearAggro(Mob* targ)
{
Client *c = nullptr;
if (targ->IsClient()) {
c = targ->CastToClient();
}
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->CheckAggro(targ)) {
if (c) {
c->RemoveXTarget(it->second, false);
}
it->second->RemoveFromHateList(targ);
}
if (c && it->second->IsOnFeignMemory(c)) {
it->second->RemoveFromFeignMemory(c); //just in case we feigned
c->RemoveXTarget(it->second, false);
}
++it;
}
}
//removes "targ" from all hate lists of mobs that are water only.
void EntityList::ClearWaterAggro(Mob* targ)
{
Client *c = nullptr;
if (targ->IsClient()) {
c = targ->CastToClient();
}
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->IsUnderwaterOnly()) {
if (it->second->CheckAggro(targ)) {
if (c) {
c->RemoveXTarget(it->second, false);
}
it->second->RemoveFromHateList(targ);
}
if (c && it->second->IsOnFeignMemory(c)) {
it->second->RemoveFromFeignMemory(c); //just in case we feigned
c->RemoveXTarget(it->second, false);
}
}
++it;
}
}
void EntityList::ClearFeignAggro(Mob *targ)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
// add Feign Memory check because sometimes weird stuff happens
if (it->second->CheckAggro(targ) || (targ->IsClient() && it->second->IsOnFeignMemory(targ->CastToClient()))) {
if (it->second->GetSpecialAbility(IMMUNE_FEIGN_DEATH)) {
++it;
continue;
}
if (targ->IsClient()) {
std::vector<EQ::Any> args;
args.push_back(it->second);
int i = parse->EventPlayer(EVENT_FEIGN_DEATH, targ->CastToClient(), "", 0, &args);
if (i != 0) {
++it;
continue;
}
if (it->second->IsNPC()) {
int i = parse->EventNPC(EVENT_FEIGN_DEATH, it->second->CastToNPC(), targ, "", 0);
if (i != 0) {
++it;
continue;
}
}
}
it->second->RemoveFromHateList(targ);
if (targ->IsClient()) {
if (it->second->GetLevel() >= 35 && zone->random.Roll(60))
it->second->AddFeignMemory(targ->CastToClient());
else
targ->CastToClient()->RemoveXTarget(it->second, false);
}
}
++it;
}
}
void EntityList::ClearZoneFeignAggro(Client *targ)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
it->second->RemoveFromFeignMemory(targ);
targ->CastToClient()->RemoveXTarget(it->second, false);
++it;
}
}
void EntityList::AggroZone(Mob *who, uint32 hate)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
it->second->AddToHateList(who, hate);
++it;
}
}
// Signal Quest command function
void EntityList::SignalMobsByNPCID(uint32 snpc, int signal_id)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC *pit = it->second;
if (pit->GetNPCTypeID() == snpc)
pit->SignalNPC(signal_id);
++it;
}
}
bool EntityList::MakeTrackPacket(Client *client)
{
std::list<std::pair<Mob *, float> > tracking_list;
uint32 distance = 0;
float MobDistance;
if (client->GetClass() == DRUID)
distance = (client->GetSkill(EQ::skills::SkillTracking) * 10);
else if (client->GetClass() == RANGER)
distance = (client->GetSkill(EQ::skills::SkillTracking) * 12);
else if (client->GetClass() == BARD)
distance = (client->GetSkill(EQ::skills::SkillTracking) * 7);
if (distance <= 0)
return false;
if (distance < 300)
distance = 300;
for (auto it = mob_list.cbegin(); it != mob_list.cend(); ++it) {
if (!it->second || it->second == client || !it->second->IsTrackable() ||
it->second->IsInvisible(client))
continue;
MobDistance = DistanceNoZ(it->second->GetPosition(), client->GetPosition());
if (MobDistance > distance)
continue;
tracking_list.push_back(std::make_pair(it->second, MobDistance));
}
tracking_list.sort(
[](const std::pair<Mob *, float> &a, const std::pair<Mob *, float> &b) {
return a.first->GetSpawnTimeStamp() > b.first->GetSpawnTimeStamp();
});
auto outapp = new EQApplicationPacket(OP_Track, sizeof(Track_Struct) * tracking_list.size());
Tracking_Struct *outtrack = (Tracking_Struct *)outapp->pBuffer;
outapp->priority = 6;
int index = 0;
for (auto it = tracking_list.cbegin(); it != tracking_list.cend(); ++it, ++index) {
Mob *cur_entity = it->first;
outtrack->Entrys[index].entityid = (uint32)cur_entity->GetID();
outtrack->Entrys[index].distance = it->second;
outtrack->Entrys[index].level = cur_entity->GetLevel();
outtrack->Entrys[index].is_npc = !cur_entity->IsClient();
strn0cpy(outtrack->Entrys[index].name, cur_entity->GetName(), sizeof(outtrack->Entrys[index].name));
outtrack->Entrys[index].is_pet = cur_entity->IsPet();
outtrack->Entrys[index].is_merc = cur_entity->IsMerc();
}
client->QueuePacket(outapp);
safe_delete(outapp);
return true;
}
void EntityList::MessageGroup(Mob *sender, bool skipclose, uint32 type, const char *message, ...)
{
va_list argptr;
char buffer[4096];
va_start(argptr, message);
vsnprintf(buffer, 4095, message, argptr);
va_end(argptr);
float dist2 = 100;
if (skipclose)
dist2 = 0;
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second != sender &&
(Distance(it->second->GetPosition(), sender->GetPosition()) <= dist2 || it->second->GetGroup() == sender->CastToClient()->GetGroup())) {
it->second->Message(type, buffer);
}
++it;
}
}
bool EntityList::Fighting(Mob *targ)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->CheckAggro(targ))
return true;
++it;
}
return false;
}
void EntityList::AddHealAggro(Mob *target, Mob *caster, uint16 hate)
{
if (hate == 0)
return;
for (auto &e : npc_list) {
auto &npc = e.second;
if (!npc->CheckAggro(target) || npc->IsFeared() || npc->IsPet())
continue;
if (zone->random.Roll(50)) // witness check -- place holder
// This is either a level check (con color check?) or a stat roll
continue;
if ((npc->IsMezzed() || npc->IsStunned()) && hate > 4) // patch notes say stunned/mezzed NPCs get a fraction of the hate
npc->AddToHateList(caster, hate / 4); // made up number
else
npc->AddToHateList(caster, hate);
}
}
void EntityList::OpenDoorsNear(Mob *who)
{
if (!who->CanOpenDoors()) {
return;
}
for (auto &it : door_list) {
Doors *door = it.second;
if (!door || door->IsDoorOpen()) {
continue;
}
auto diff = who->GetPosition() - door->GetPosition();
float distance = diff.x * diff.x + diff.y * diff.y;
if (diff.z * diff.z < 10 && distance <= 100) {
door->Open(who);
}
}
}
void EntityList::SendAlarm(Trap *trap, Mob *currenttarget, uint8 kos)
{
float preSquareDistance = trap->effectvalue * trap->effectvalue;
for (auto it = npc_list.begin();it != npc_list.end(); ++it) {
NPC *cur = it->second;
auto diff = glm::vec3(cur->GetPosition()) - trap->m_Position;
float curdist = diff.x * diff.x + diff.y * diff.y + diff.z * diff.z;
if (cur->GetOwner() || cur->IsEngaged() || curdist > preSquareDistance )
continue;
if (kos) {
uint8 factioncon = currenttarget->GetReverseFactionCon(cur);
if (factioncon == FACTION_THREATENLY || factioncon == FACTION_SCOWLS) {
cur->AddToHateList(currenttarget,1);
}
}
else
cur->AddToHateList(currenttarget,1);
}
}
void EntityList::AddProximity(NPC *proximity_for)
{
RemoveProximity(proximity_for->GetID());
proximity_list.push_back(proximity_for);
proximity_for->proximity = new NPCProximity; // deleted in NPC::~NPC
}
bool EntityList::RemoveProximity(uint16 delete_npc_id)
{
auto it = std::find_if(proximity_list.begin(), proximity_list.end(),
[delete_npc_id](const NPC *a) { return a->GetID() == delete_npc_id; });
if (it == proximity_list.end())
return false;
proximity_list.erase(it);
return true;
}
void EntityList::RemoveAllLocalities()
{
proximity_list.clear();
}
struct quest_proximity_event {
QuestEventID event_id;
Client *client;
NPC *npc;
int area_id;
int area_type;
};
void EntityList::ProcessMove(Client *c, const glm::vec3& location)
{
float last_x = c->ProximityX();
float last_y = c->ProximityY();
float last_z = c->ProximityZ();
std::list<quest_proximity_event> events;
for (auto iter = proximity_list.begin(); iter != proximity_list.end(); ++iter) {
NPC *d = (*iter);
NPCProximity *l = d->proximity;
if (l == nullptr)
continue;
//check both bounding boxes, if either coords pairs
//cross a boundary, send the event.
bool old_in = true;
bool new_in = true;
if (last_x < l->min_x || last_x > l->max_x ||
last_y < l->min_y || last_y > l->max_y ||
last_z < l->min_z || last_z > l->max_z) {
old_in = false;
}
if (location.x < l->min_x || location.x > l->max_x ||
location.y < l->min_y || location.y > l->max_y ||
location.z < l->min_z || location.z > l->max_z) {
new_in = false;
}
if (old_in && !new_in) {
quest_proximity_event evt;
evt.event_id = EVENT_EXIT;
evt.client = c;
evt.npc = d;
evt.area_id = 0;
evt.area_type = 0;
events.push_back(evt);
} else if (new_in && !old_in) {
quest_proximity_event evt;
evt.event_id = EVENT_ENTER;
evt.client = c;
evt.npc = d;
evt.area_id = 0;
evt.area_type = 0;
events.push_back(evt);
}
}
for (auto iter = area_list.begin(); iter != area_list.end(); ++iter) {
Area& a = (*iter);
bool old_in = true;
bool new_in = true;
if (last_x < a.min_x || last_x > a.max_x ||
last_y < a.min_y || last_y > a.max_y ||
last_z < a.min_z || last_z > a.max_z) {
old_in = false;
}
if (location.x < a.min_x || location.x > a.max_x ||
location.y < a.min_y || location.y > a.max_y ||
location.z < a.min_z || location.z > a.max_z ) {
new_in = false;
}
if (old_in && !new_in) {
//were in but are no longer.
quest_proximity_event evt;
evt.event_id = EVENT_LEAVE_AREA;
evt.client = c;
evt.npc = nullptr;
evt.area_id = a.id;
evt.area_type = a.type;
events.push_back(evt);
} else if (!old_in && new_in) {
//were not in but now are
quest_proximity_event evt;
evt.event_id = EVENT_ENTER_AREA;
evt.client = c;
evt.npc = nullptr;
evt.area_id = a.id;
evt.area_type = a.type;
events.push_back(evt);
}
}
for (auto iter = events.begin(); iter != events.end(); ++iter) {
quest_proximity_event& evt = (*iter);
if (evt.npc) {
std::vector<EQ::Any> args;
parse->EventNPC(evt.event_id, evt.npc, evt.client, "", 0, &args);
} else {
std::vector<EQ::Any> args;
args.push_back(&evt.area_id);
args.push_back(&evt.area_type);
parse->EventPlayer(evt.event_id, evt.client, "", 0, &args);
}
}
}
void EntityList::ProcessMove(NPC *n, float x, float y, float z) {
float last_x = n->GetX();
float last_y = n->GetY();
float last_z = n->GetZ();
std::list<quest_proximity_event> events;
for (auto iter = area_list.begin(); iter != area_list.end(); ++iter) {
Area &a = (*iter);
bool old_in = true;
bool new_in = true;
if (last_x < a.min_x || last_x > a.max_x ||
last_y < a.min_y || last_y > a.max_y ||
last_z < a.min_z || last_z > a.max_z) {
old_in = false;
}
if (x < a.min_x || x > a.max_x ||
y < a.min_y || y > a.max_y ||
z < a.min_z || z > a.max_z) {
new_in = false;
}
if (old_in && !new_in) {
//were in but are no longer.
quest_proximity_event evt;
evt.event_id = EVENT_LEAVE_AREA;
evt.client = nullptr;
evt.npc = n;
evt.area_id = a.id;
evt.area_type = a.type;
events.push_back(evt);
}
else if (!old_in && new_in) {
//were not in but now are
quest_proximity_event evt;
evt.event_id = EVENT_ENTER_AREA;
evt.client = nullptr;
evt.npc = n;
evt.area_id = a.id;
evt.area_type = a.type;
events.push_back(evt);
}
}
for (auto iter = events.begin(); iter != events.end(); ++iter) {
quest_proximity_event &evt = (*iter);
std::vector<EQ::Any> args;
args.push_back(&evt.area_id);
args.push_back(&evt.area_type);
parse->EventNPC(evt.event_id, evt.npc, evt.client, "", 0, &args);
}
}
void EntityList::AddArea(int id, int type, float min_x, float max_x, float min_y,
float max_y, float min_z, float max_z)
{
RemoveArea(id);
Area a;
a.id = id;
a.type = type;
if (min_x > max_x) {
a.min_x = max_x;
a.max_x = min_x;
} else {
a.min_x = min_x;
a.max_x = max_x;
}
if (min_y > max_y) {
a.min_y = max_y;
a.max_y = min_y;
} else {
a.min_y = min_y;
a.max_y = max_y;
}
if (min_z > max_z) {
a.min_z = max_z;
a.max_z = min_z;
} else {
a.min_z = min_z;
a.max_z = max_z;
}
area_list.push_back(a);
}
void EntityList::RemoveArea(int id)
{
auto it = std::find_if(area_list.begin(), area_list.end(),
[id](const Area &a) { return a.id == id; });
if (it == area_list.end())
return;
area_list.erase(it);
}
void EntityList::ClearAreas()
{
area_list.clear();
}
void EntityList::ProcessProximitySay(const char *Message, Client *c, uint8 language)
{
if (!Message || !c)
return;
auto iter = proximity_list.begin();
for (; iter != proximity_list.end(); ++iter) {
NPC *d = (*iter);
NPCProximity *l = d->proximity;
if (l == nullptr || !l->say)
continue;
if (c->GetX() < l->min_x || c->GetX() > l->max_x
|| c->GetY() < l->min_y || c->GetY() > l->max_y
|| c->GetZ() < l->min_z || c->GetZ() > l->max_z)
continue;
parse->EventNPC(EVENT_PROXIMITY_SAY, d, c, Message, language);
}
}
void EntityList::SaveAllClientsTaskState()
{
if (!task_manager) {
return;
}
auto it = client_list.begin();
while (it != client_list.end()) {
Client *client = it->second;
if (client->IsTaskStateLoaded()) {
client->SaveTaskState();
}
++it;
}
}
void EntityList::ReloadAllClientsTaskState(int task_id)
{
if (!task_manager)
return;
auto it = client_list.begin();
while (it != client_list.end()) {
Client *client = it->second;
if (client->IsTaskStateLoaded()) {
// If we have been passed a TaskID, only reload the client state if they have
// that Task active.
if ((!task_id) || (task_id && client->IsTaskActive(task_id))) {
Log(Logs::General, Logs::Tasks, "[CLIENTLOAD] Reloading Task State For Client %s", client->GetName());
client->RemoveClientTaskState();
client->LoadClientTaskState();
task_manager->SendActiveTasksToClient(client);
}
}
++it;
}
}
bool EntityList::IsMobInZone(Mob *who)
{
//We don't use mob_list.find(who) because this code needs to be able to handle dangling pointers for the quest code.
auto it = mob_list.begin();
while(it != mob_list.end()) {
if(it->second == who) {
return true;
}
++it;
}
auto enc_it = encounter_list.begin();
while (enc_it != encounter_list.end()) {
if (enc_it->second == who) {
return true;
}
++enc_it;
}
return false;
}
/*
Code to limit the amount of certain NPCs in a given zone.
Primarily used to make a named mob unique within the zone, but written
to be more generic allowing limits larger than 1.
Maintain this stuff in a seperate list since the number
of limited NPCs will most likely be much smaller than the number
of NPCs in the entire zone.
*/
void EntityList::LimitAddNPC(NPC *npc)
{
if (!npc)
return;
SpawnLimitRecord r;
uint16 eid = npc->GetID();
r.spawngroup_id = npc->GetSpawnGroupId();
r.npc_type = npc->GetNPCTypeID();
npc_limit_list[eid] = r;
}
void EntityList::LimitRemoveNPC(NPC *npc)
{
if (!npc)
return;
uint16 eid = npc->GetID();
npc_limit_list.erase(eid);
}
//check a limit over the entire zone.
//returns true if the limit has not been reached
bool EntityList::LimitCheckType(uint32 npc_type, int count)
{
if (count < 1)
return true;
std::map<uint16, SpawnLimitRecord>::iterator cur,end;
cur = npc_limit_list.begin();
end = npc_limit_list.end();
for (; cur != end; ++cur) {
if (cur->second.npc_type == npc_type) {
count--;
if (count == 0) {
return false;
}
}
}
return true;
}
//check limits on an npc type in a given spawn group.
//returns true if the limit has not been reached
bool EntityList::LimitCheckGroup(uint32 spawngroup_id, int count)
{
if (count < 1)
return true;
std::map<uint16, SpawnLimitRecord>::iterator cur,end;
cur = npc_limit_list.begin();
end = npc_limit_list.end();
for (; cur != end; ++cur) {
if (cur->second.spawngroup_id == spawngroup_id) {
count--;
if (count == 0) {
return false;
}
}
}
return true;
}
//check limits on an npc type in a given spawn group, and
//checks limits on the entire zone in one pass.
//returns true if neither limit has been reached
bool EntityList::LimitCheckBoth(uint32 npc_type, uint32 spawngroup_id, int group_count, int type_count)
{
if (group_count < 1 && type_count < 1)
return true;
std::map<uint16, SpawnLimitRecord>::iterator cur,end;
cur = npc_limit_list.begin();
end = npc_limit_list.end();
for (; cur != end; ++cur) {
if (cur->second.npc_type == npc_type) {
type_count--;
if (type_count == 0) {
return false;
}
}
if (cur->second.spawngroup_id == spawngroup_id) {
group_count--;
if (group_count == 0) {
return false;
}
}
}
return true;
}
bool EntityList::LimitCheckName(const char *npc_name)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC* npc = it->second;
if (npc)
if (strcasecmp(npc_name, npc->GetRawNPCTypeName()) == 0)
return false;
++it;
}
return true;
}
void EntityList::UpdateHoTT(Mob *target)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
if (c->GetTarget() == target) {
if (target->GetTarget())
c->SetHoTT(target->GetTarget()->GetID());
else
c->SetHoTT(0);
c->UpdateXTargetType(TargetsTarget, target->GetTarget());
}
++it;
}
}
void EntityList::DestroyTempPets(Mob *owner)
{
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC* n = it->second;
if (n->GetSwarmInfo()) {
if (n->GetSwarmInfo()->owner_id == owner->GetID()) {
n->Depop();
}
}
++it;
}
}
int16 EntityList::CountTempPets(Mob *owner)
{
int16 count = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC* n = it->second;
if (n->GetSwarmInfo()) {
if (n->GetSwarmInfo()->owner_id == owner->GetID()) {
count++;
}
}
++it;
}
owner->SetTempPetCount(count);
return count;
}
void EntityList::AddTempPetsToHateList(Mob *owner, Mob* other, bool bFrenzy)
{
if (!other || !owner)
return;
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC* n = it->second;
if (n->GetSwarmInfo()) {
if (n->GetSwarmInfo()->owner_id == owner->GetID()) {
if (
!n->GetSpecialAbility(IMMUNE_AGGRO) &&
!(n->GetSpecialAbility(IMMUNE_AGGRO_CLIENT) && other->IsClient()) &&
!(n->GetSpecialAbility(IMMUNE_AGGRO_NPC) && other->IsNPC())
) {
n->hate_list.AddEntToHateList(other, 0, 0, bFrenzy);
}
}
}
++it;
}
}
bool Entity::CheckCoordLosNoZLeaps(float cur_x, float cur_y, float cur_z,
float trg_x, float trg_y, float trg_z, float perwalk)
{
if (zone->zonemap == nullptr)
return true;
glm::vec3 myloc;
glm::vec3 oloc;
glm::vec3 hit;
myloc.x = cur_x;
myloc.y = cur_y;
myloc.z = cur_z+5;
oloc.x = trg_x;
oloc.y = trg_y;
oloc.z = trg_z+5;
if (myloc.x == oloc.x && myloc.y == oloc.y && myloc.z == oloc.z)
return true;
if (!zone->zonemap->LineIntersectsZoneNoZLeaps(myloc,oloc,perwalk,&hit))
return true;
return false;
}
void EntityList::QuestJournalledSayClose(Mob *sender, float dist, const char *mobname, const char *message,
Journal::Options &opts)
{
SerializeBuffer buf(sizeof(SpecialMesgHeader_Struct) + 12 + 64 + 64);
buf.WriteInt8(static_cast<int8>(opts.speak_mode));
buf.WriteInt8(static_cast<int8>(opts.journal_mode));
buf.WriteInt8(opts.language);
buf.WriteInt32(opts.message_type);
buf.WriteInt32(opts.target_spawn_id);
buf.WriteString(mobname);
buf.WriteInt32(0); // location, client doesn't seem to do anything with this
buf.WriteInt32(0);
buf.WriteInt32(0);
buf.WriteString(message);
auto outapp = new EQApplicationPacket(OP_SpecialMesg, buf);
// client only bothers logging if target spawn ID matches, safe to send to everyone
QueueCloseClients(sender, outapp, false, dist);
delete outapp;
}
Corpse *EntityList::GetClosestCorpse(Mob *sender, const char *Name)
{
if (!sender)
return nullptr;
uint32 CurrentDistance, ClosestDistance = 4294967295u;
Corpse *CurrentCorpse, *ClosestCorpse = nullptr;
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
CurrentCorpse = it->second;
++it;
if (Name && strcasecmp(CurrentCorpse->GetOwnerName(), Name))
continue;
CurrentDistance = ((CurrentCorpse->GetY() - sender->GetY()) * (CurrentCorpse->GetY() - sender->GetY())) +
((CurrentCorpse->GetX() - sender->GetX()) * (CurrentCorpse->GetX() - sender->GetX()));
if (CurrentDistance < ClosestDistance) {
ClosestDistance = CurrentDistance;
ClosestCorpse = CurrentCorpse;
}
}
return ClosestCorpse;
}
void EntityList::ForceGroupUpdate(uint32 gid)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second){
Group *g = nullptr;
g = it->second->GetGroup();
if (g) {
if (g->GetID() == gid) {
database.RefreshGroupFromDB(it->second);
}
}
}
++it;
}
}
void EntityList::SendGroupLeave(uint32 gid, const char *name)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
if (c) {
Group *g = nullptr;
g = c->GetGroup();
if (g) {
if (g->GetID() == gid) {
auto outapp = new EQApplicationPacket(OP_GroupUpdate, sizeof(GroupJoin_Struct));
GroupJoin_Struct* gj = (GroupJoin_Struct*) outapp->pBuffer;
strcpy(gj->membername, name);
gj->action = groupActLeave;
strcpy(gj->yourname, c->GetName());
Mob *Leader = g->GetLeader();
if (Leader)
Leader->CastToClient()->GetGroupAAs(&gj->leader_aas);
c->QueuePacket(outapp);
safe_delete(outapp);
g->DelMemberOOZ(name);
if (g->IsLeader(c) && c->IsLFP())
c->UpdateLFP();
}
}
}
++it;
}
}
void EntityList::SendGroupJoin(uint32 gid, const char *name)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second){
Group *g = nullptr;
g = it->second->GetGroup();
if (g) {
if (g->GetID() == gid) {
auto outapp = new EQApplicationPacket(OP_GroupUpdate, sizeof(GroupJoin_Struct));
GroupJoin_Struct* gj = (GroupJoin_Struct*) outapp->pBuffer;
strcpy(gj->membername, name);
gj->action = groupActJoin;
strcpy(gj->yourname, it->second->GetName());
Mob *Leader = g->GetLeader();
if (Leader)
Leader->CastToClient()->GetGroupAAs(&gj->leader_aas);
it->second->QueuePacket(outapp);
safe_delete(outapp);
}
}
}
++it;
}
}
void EntityList::GroupMessage(uint32 gid, const char *from, const char *message)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second) {
Group *g = nullptr;
g = it->second->GetGroup();
if (g) {
if (g->GetID() == gid)
it->second->ChannelMessageSend(from, it->second->GetName(), ChatChannel_Group, 0, 100, message);
}
}
++it;
}
}
uint16 EntityList::CreateGroundObject(uint32 itemid, const glm::vec4& position, uint32 decay_time)
{
const EQ::ItemData *is = database.GetItem(itemid);
if (!is)
return 0;
auto i = new EQ::ItemInstance(is, is->MaxCharges);
if (!i)
return 0;
auto object = new Object(i, position.x, position.y, position.z, position.w, decay_time);
entity_list.AddObject(object, true);
safe_delete(i);
if (!object)
return 0;
return object->GetID();
}
uint16 EntityList::CreateGroundObjectFromModel(const char *model, const glm::vec4& position, uint8 type, uint32 decay_time)
{
if (!model)
return 0;
auto object = new Object(model, position.x, position.y, position.z, position.w, type);
entity_list.AddObject(object, true);
if (!object)
return 0;
return object->GetID();
}
uint16 EntityList::CreateDoor(const char *model, const glm::vec4& position, uint8 opentype, uint16 size)
{
if (!model)
return 0; // fell through everything, this is bad/incomplete from perl
auto door = new Doors(model, position, opentype, size);
RemoveAllDoors();
zone->LoadZoneDoors(zone->GetShortName(), zone->GetInstanceVersion());
entity_list.AddDoor(door);
entity_list.RespawnAllDoors();
if (door)
return door->GetEntityID();
return 0; // fell through everything, this is bad/incomplete from perl
}
Mob *EntityList::GetTargetForMez(Mob *caster)
{
if (!caster)
return nullptr;
auto it = mob_list.begin();
//TODO: make this smarter and not mez targets being damaged by dots
while (it != mob_list.end()) {
Mob *d = it->second;
if (d) {
if (d == caster) { //caster can't pick himself
++it;
continue;
}
if (caster->GetTarget() == d) { //caster can't pick his target
++it;
continue;
}
if (!caster->CheckAggro(d)) { //caster can't pick targets that aren't aggroed on himself
++it;
continue;
}
if (DistanceSquared(caster->GetPosition(), d->GetPosition()) > 22250) { //only pick targets within 150 range
++it;
continue;
}
if (!caster->CheckLosFN(d)) { //this is wasteful but can't really think of another way to do it
++it; //that wont have us trying to los the same target every time
continue; //it's only in combat so it's impact should be minimal.. but stil.
}
return d;
}
++it;
}
return nullptr;
}
void EntityList::SendZoneAppearance(Client *c)
{
if (!c) {
return;
}
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *cur = it->second;
if (cur) {
if (cur == c) {
++it;
continue;
}
if (cur->GetAppearance() != eaStanding) {
cur->SendAppearancePacket(AT_Anim, cur->GetAppearanceValue(cur->GetAppearance()), false, true, c);
}
if (cur->GetSize() != cur->GetBaseSize()) {
cur->SendAppearancePacket(AT_Size, (uint32) cur->GetSize(), false, true, c);
}
}
++it;
}
}
void EntityList::SendNimbusEffects(Client *c)
{
if (!c)
return;
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *cur = it->second;
if (cur) {
if (cur == c) {
++it;
continue;
}
if (cur->GetNimbusEffect1() != 0) {
cur->SendSpellEffect(cur->GetNimbusEffect1(), 1000, 0, 1, 3000, false, c);
}
if (cur->GetNimbusEffect2() != 0) {
cur->SendSpellEffect(cur->GetNimbusEffect2(), 2000, 0, 1, 3000, false, c);
}
if (cur->GetNimbusEffect3() != 0) {
cur->SendSpellEffect(cur->GetNimbusEffect3(), 3000, 0, 1, 3000, false, c);
}
}
++it;
}
}
void EntityList::SendUntargetable(Client *c)
{
if (!c)
return;
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *cur = it->second;
if (cur) {
if (cur == c) {
++it;
continue;
}
if (!cur->IsTargetable())
cur->SendTargetable(false, c);
}
++it;
}
}
void EntityList::ZoneWho(Client *c, Who_All_Struct *Who)
{
// This is only called for SoF clients, as regular /who is now handled server-side for that client.
uint32 PacketLength = 0;
uint32 Entries = 0;
uint8 WhomLength = strlen(Who->whom);
std::list<Client *> client_sub_list;
auto it = client_list.begin();
while (it != client_list.end()) {
Client *ClientEntry = it->second;
++it;
if (ClientEntry) {
if (ClientEntry->GMHideMe(c))
continue;
if ((Who->wrace != 0xFFFFFFFF) && (ClientEntry->GetRace() != Who->wrace))
continue;
if ((Who->wclass != 0xFFFFFFFF) && (ClientEntry->GetClass() != Who->wclass))
continue;
if ((Who->lvllow != 0xFFFFFFFF) && (ClientEntry->GetLevel() < Who->lvllow))
continue;
if ((Who->lvlhigh != 0xFFFFFFFF) && (ClientEntry->GetLevel() > Who->lvlhigh))
continue;
if (Who->guildid != 0xFFFFFFFF) {
if ((Who->guildid == 0xFFFFFFFC) && !ClientEntry->IsTrader())
continue;
if ((Who->guildid == 0xFFFFFFFB) && !ClientEntry->IsBuyer())
continue;
if (Who->guildid != ClientEntry->GuildID())
continue;
}
if (WhomLength && strncasecmp(Who->whom, ClientEntry->GetName(), WhomLength) &&
strncasecmp(guild_mgr.GetGuildName(ClientEntry->GuildID()), Who->whom, WhomLength))
continue;
Entries++;
client_sub_list.push_back(ClientEntry);
PacketLength = PacketLength + strlen(ClientEntry->GetName());
if (strlen(guild_mgr.GetGuildName(ClientEntry->GuildID())) > 0)
PacketLength = PacketLength + strlen(guild_mgr.GetGuildName(ClientEntry->GuildID())) + 2;
}
}
PacketLength = PacketLength + sizeof(WhoAllReturnStruct) + (47 * Entries);
auto outapp = new EQApplicationPacket(OP_WhoAllResponse, PacketLength);
char *Buffer = (char *)outapp->pBuffer;
WhoAllReturnStruct *WARS = (WhoAllReturnStruct *)Buffer;
WARS->id = 0;
WARS->playerineqstring = 5001;
strncpy(WARS->line, "---------------------------", sizeof(WARS->line));
WARS->unknown35 = 0x0a;
WARS->unknown36 = 0;
switch(Entries) {
case 0:
WARS->playersinzonestring = 5029;
break;
case 1:
WARS->playersinzonestring = 5028; // 5028 There is %1 player in EverQuest.
break;
default:
WARS->playersinzonestring = 5036; // 5036 There are %1 players in EverQuest.
}
WARS->unknown44[0] = 0;
WARS->unknown44[1] = 0;
WARS->unknown52 = Entries;
WARS->unknown56 = Entries;
WARS->playercount = Entries;
Buffer += sizeof(WhoAllReturnStruct);
auto sit = client_sub_list.begin();
while (sit != client_sub_list.end()) {
Client *ClientEntry = *sit;
++sit;
if (ClientEntry) {
if (ClientEntry->GMHideMe(c))
continue;
if ((Who->wrace != 0xFFFFFFFF) && (ClientEntry->GetRace() != Who->wrace))
continue;
if ((Who->wclass != 0xFFFFFFFF) && (ClientEntry->GetClass() != Who->wclass))
continue;
if ((Who->lvllow != 0xFFFFFFFF) && (ClientEntry->GetLevel() < Who->lvllow))
continue;
if ((Who->lvlhigh != 0xFFFFFFFF) && (ClientEntry->GetLevel() > Who->lvlhigh))
continue;
if (Who->guildid != 0xFFFFFFFF) {
if ((Who->guildid == 0xFFFFFFFC) && !ClientEntry->IsTrader())
continue;
if ((Who->guildid == 0xFFFFFFFB) && !ClientEntry->IsBuyer())
continue;
if (Who->guildid != ClientEntry->GuildID())
continue;
}
if (WhomLength && strncasecmp(Who->whom, ClientEntry->GetName(), WhomLength) &&
strncasecmp(guild_mgr.GetGuildName(ClientEntry->GuildID()), Who->whom, WhomLength))
continue;
std::string GuildName;
if ((ClientEntry->GuildID() != GUILD_NONE) && (ClientEntry->GuildID() > 0)) {
GuildName = "<";
GuildName += guild_mgr.GetGuildName(ClientEntry->GuildID());
GuildName += ">";
}
uint32 FormatMSGID = 5025; // 5025 %T1[%2 %3] %4 (%5) %6 %7 %8 %9
if (ClientEntry->GetAnon() == 1)
FormatMSGID = 5024; // 5024 %T1[ANONYMOUS] %2 %3
else if (ClientEntry->GetAnon() == 2)
FormatMSGID = 5023; // 5023 %T1[ANONYMOUS] %2 %3 %4
uint32 PlayerClass = 0;
uint32 PlayerLevel = 0;
uint32 PlayerRace = 0;
uint32 ZoneMSGID = 0xFFFFFFFF;
if (ClientEntry->GetAnon()==0) {
PlayerClass = ClientEntry->GetClass();
PlayerLevel = ClientEntry->GetLevel();
PlayerRace = ClientEntry->GetRace();
}
WhoAllPlayerPart1* WAPP1 = (WhoAllPlayerPart1*)Buffer;
WAPP1->FormatMSGID = FormatMSGID;
WAPP1->PIDMSGID = 0xFFFFFFFF;
strcpy(WAPP1->Name, ClientEntry->GetName());
Buffer += sizeof(WhoAllPlayerPart1) + strlen(WAPP1->Name);
WhoAllPlayerPart2* WAPP2 = (WhoAllPlayerPart2*)Buffer;
if (ClientEntry->IsTrader())
WAPP2->RankMSGID = 12315;
else if (ClientEntry->IsBuyer())
WAPP2->RankMSGID = 6056;
else if (ClientEntry->Admin() >= 10 && ClientEntry->GetGM())
WAPP2->RankMSGID = 12312;
else
WAPP2->RankMSGID = 0xFFFFFFFF;
strcpy(WAPP2->Guild, GuildName.c_str());
Buffer += sizeof(WhoAllPlayerPart2) + strlen(WAPP2->Guild);
WhoAllPlayerPart3* WAPP3 = (WhoAllPlayerPart3*)Buffer;
WAPP3->Unknown80[0] = 0xFFFFFFFF;
if (ClientEntry->IsLD())
WAPP3->Unknown80[1] = 12313; // LinkDead
else
WAPP3->Unknown80[1] = 0xFFFFFFFF;
WAPP3->ZoneMSGID = ZoneMSGID;
WAPP3->Zone = 0;
WAPP3->Class_ = PlayerClass;
WAPP3->Level = PlayerLevel;
WAPP3->Race = PlayerRace;
WAPP3->Account[0] = 0;
Buffer += sizeof(WhoAllPlayerPart3);
WhoAllPlayerPart4* WAPP4 = (WhoAllPlayerPart4*)Buffer;
WAPP4->Unknown100 = 0;
Buffer += sizeof(WhoAllPlayerPart4);
}
}
c->QueuePacket(outapp);
safe_delete(outapp);
}
void EntityList::UnMarkNPC(uint16 ID)
{
// Designed to be called from the Mob destructor, this method calls Group::UnMarkNPC for
// each group to remove the dead mobs entity ID from the groups list of NPCs marked via the
// Group Leadership AA Mark NPC ability.
//
auto it = group_list.begin();
while (it != group_list.end()) {
if (*it)
(*it)->UnMarkNPC(ID);
++it;
}
}
uint32 EntityList::CheckNPCsClose(Mob *center)
{
uint32 count = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
NPC *cur = it->second;
if (!cur || cur == center || cur->IsPet() || cur->GetClass() == LDON_TREASURE ||
cur->GetBodyType() == BT_NoTarget || cur->GetBodyType() == BT_Special) {
++it;
continue;
}
float xDiff = cur->GetX() - center->GetX();
float yDiff = cur->GetY() - center->GetY();
float zDiff = cur->GetZ() - center->GetZ();
float dist = ((xDiff * xDiff) + (yDiff * yDiff) + (zDiff * zDiff));
if (dist <= RuleR(Adventure, DistanceForRescueAccept))
count++;
++it;
}
return count;
}
void EntityList::GateAllClients()
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
if (c)
c->GoToBind();
++it;
}
}
void EntityList::SignalAllClients(uint32 data)
{
auto it = client_list.begin();
while (it != client_list.end()) {
Client *ent = it->second;
if (ent)
ent->Signal(data);
++it;
}
}
uint16 EntityList::GetClientCount(){
uint16 ClientCount = 0;
std::list<Client*> client_list;
entity_list.GetClientList(client_list);
auto iter = client_list.begin();
while (iter != client_list.end()) {
Client *entry = (*iter);
entry->GetCleanName();
ClientCount++;
iter++;
}
return ClientCount;
}
void EntityList::GetMobList(std::list<Mob *> &m_list)
{
m_list.clear();
auto it = mob_list.begin();
while (it != mob_list.end()) {
m_list.push_back(it->second);
++it;
}
}
void EntityList::GetNPCList(std::list<NPC *> &n_list)
{
n_list.clear();
auto it = npc_list.begin();
while (it != npc_list.end()) {
n_list.push_back(it->second);
++it;
}
}
void EntityList::GetClientList(std::list<Client *> &c_list)
{
c_list.clear();
auto it = client_list.begin();
while (it != client_list.end()) {
c_list.push_back(it->second);
++it;
}
}
#ifdef BOTS
void EntityList::GetBotList(std::list<Bot *> &b_list)
{
b_list.clear();
for (auto bot_iterator : bot_list) {
b_list.push_back(bot_iterator);
}
}
#endif
void EntityList::GetCorpseList(std::list<Corpse *> &c_list)
{
c_list.clear();
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
c_list.push_back(it->second);
++it;
}
}
void EntityList::GetObjectList(std::list<Object *> &o_list)
{
o_list.clear();
auto it = object_list.begin();
while (it != object_list.end()) {
o_list.push_back(it->second);
++it;
}
}
void EntityList::GetDoorsList(std::list<Doors*> &o_list)
{
o_list.clear();
auto it = door_list.begin();
while (it != door_list.end()) {
o_list.push_back(it->second);
++it;
}
}
void EntityList::GetSpawnList(std::list<Spawn2*> &o_list)
{
o_list.clear();
if(zone) {
LinkedListIterator<Spawn2*> iterator(zone->spawn2_list);
iterator.Reset();
while(iterator.MoreElements())
{
Spawn2 *ent = iterator.GetData();
o_list.push_back(ent);
iterator.Advance();
}
}
}
void EntityList::UpdateQGlobal(uint32 qid, QGlobal newGlobal)
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *ent = it->second;
if (ent->IsClient()) {
QGlobalCache *qgc = ent->CastToClient()->GetQGlobals();
if (qgc) {
uint32 char_id = ent->CastToClient()->CharacterID();
if (newGlobal.char_id == char_id && newGlobal.npc_id == 0)
qgc->AddGlobal(qid, newGlobal);
}
} else if (ent->IsNPC()) {
QGlobalCache *qgc = ent->CastToNPC()->GetQGlobals();
if (qgc) {
uint32 npc_id = ent->GetNPCTypeID();
if (newGlobal.npc_id == npc_id)
qgc->AddGlobal(qid, newGlobal);
}
}
++it;
}
}
void EntityList::DeleteQGlobal(std::string name, uint32 npcID, uint32 charID, uint32 zoneID)
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *ent = it->second;
if (ent->IsClient()) {
QGlobalCache *qgc = ent->CastToClient()->GetQGlobals();
if (qgc)
qgc->RemoveGlobal(name, npcID, charID, zoneID);
} else if (ent->IsNPC()) {
QGlobalCache *qgc = ent->CastToNPC()->GetQGlobals();
if (qgc)
qgc->RemoveGlobal(name, npcID, charID, zoneID);
}
++it;
}
}
void EntityList::SendFindableNPCList(Client *c)
{
if (!c)
return;
auto outapp = new EQApplicationPacket(OP_SendFindableNPCs, sizeof(FindableNPC_Struct));
FindableNPC_Struct *fnpcs = (FindableNPC_Struct *)outapp->pBuffer;
fnpcs->Unknown109 = 0x16;
fnpcs->Unknown110 = 0x06;
fnpcs->Unknown111 = 0x24;
fnpcs->Action = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second) {
NPC *n = it->second;
if (n->IsFindable()) {
fnpcs->EntityID = n->GetID();
strn0cpy(fnpcs->Name, n->GetCleanName(), sizeof(fnpcs->Name));
strn0cpy(fnpcs->LastName, n->GetLastName(), sizeof(fnpcs->LastName));
fnpcs->Race = n->GetRace();
fnpcs->Class = n->GetClass();
c->QueuePacket(outapp);
}
}
++it;
}
safe_delete(outapp);
}
void EntityList::UpdateFindableNPCState(NPC *n, bool Remove)
{
if (!n || !n->IsFindable())
return;
auto outapp = new EQApplicationPacket(OP_SendFindableNPCs, sizeof(FindableNPC_Struct));
FindableNPC_Struct *fnpcs = (FindableNPC_Struct *)outapp->pBuffer;
fnpcs->Unknown109 = 0x16;
fnpcs->Unknown110 = 0x06;
fnpcs->Unknown111 = 0x24;
fnpcs->Action = Remove ? 1: 0;
fnpcs->EntityID = n->GetID();
strn0cpy(fnpcs->Name, n->GetCleanName(), sizeof(fnpcs->Name));
strn0cpy(fnpcs->LastName, n->GetLastName(), sizeof(fnpcs->LastName));
fnpcs->Race = n->GetRace();
fnpcs->Class = n->GetClass();
auto it = client_list.begin();
while (it != client_list.end()) {
Client *c = it->second;
if (c && (c->ClientVersion() >= EQ::versions::ClientVersion::SoD))
c->QueuePacket(outapp);
++it;
}
safe_delete(outapp);
}
void EntityList::HideCorpses(Client *c, uint8 CurrentMode, uint8 NewMode)
{
if (!c)
return;
if (NewMode == HideCorpseNone) {
SendZoneCorpses(c);
return;
}
Group *g = nullptr;
if (NewMode == HideCorpseAllButGroup) {
g = c->GetGroup();
if (!g)
NewMode = HideCorpseAll;
}
auto it = corpse_list.begin();
while (it != corpse_list.end()) {
Corpse *b = it->second;
if (b && (b->GetCharID() != c->CharacterID())) {
if ((NewMode == HideCorpseAll) || ((NewMode == HideCorpseNPC) && (b->IsNPCCorpse()))) {
EQApplicationPacket outapp;
b->CreateDespawnPacket(&outapp, false);
c->QueuePacket(&outapp);
} else if(NewMode == HideCorpseAllButGroup) {
if (!g->IsGroupMember(b->GetOwnerName())) {
EQApplicationPacket outapp;
b->CreateDespawnPacket(&outapp, false);
c->QueuePacket(&outapp);
} else if((CurrentMode == HideCorpseAll)) {
EQApplicationPacket outapp;
b->CreateSpawnPacket(&outapp);
c->QueuePacket(&outapp);
}
}
}
++it;
}
}
void EntityList::AddLootToNPCS(uint32 item_id, uint32 count)
{
if (count == 0)
return;
int npc_count = 0;
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (!it->second->IsPet()
&& it->second->GetClass() != LDON_TREASURE
&& it->second->GetBodyType() != BT_NoTarget
&& it->second->GetBodyType() != BT_NoTarget2
&& it->second->GetBodyType() != BT_Special)
npc_count++;
++it;
}
if (npc_count == 0)
return;
auto npcs = new NPC *[npc_count];
auto counts = new int[npc_count];
auto marked = new bool[npc_count];
memset(counts, 0, sizeof(int) * npc_count);
memset(marked, 0, sizeof(bool) * npc_count);
int i = 0;
it = npc_list.begin();
while (it != npc_list.end()) {
if (!it->second->IsPet()
&& it->second->GetClass() != LDON_TREASURE
&& it->second->GetBodyType() != BT_NoTarget
&& it->second->GetBodyType() != BT_NoTarget2
&& it->second->GetBodyType() != BT_Special)
npcs[i++] = it->second;
++it;
}
while (count > 0) {
std::vector<int> selection;
selection.reserve(npc_count);
for (int j = 0; j < npc_count; ++j)
selection.push_back(j);
while (!selection.empty() && count > 0) {
int k = zone->random.Int(0, selection.size() - 1);
counts[selection[k]]++;
count--;
selection.erase(selection.begin() + k);
}
}
for (int j = 0; j < npc_count; ++j)
if (counts[j] > 0)
for (int k = 0; k < counts[j]; ++k)
npcs[j]->AddItem(item_id, 1);
safe_delete_array(npcs);
safe_delete_array(counts);
safe_delete_array(marked);
}
void EntityList::CameraEffect(uint32 duration, uint32 intensity)
{
auto outapp = new EQApplicationPacket(OP_CameraEffect, sizeof(Camera_Struct));
Camera_Struct* cs = (Camera_Struct*) outapp->pBuffer;
cs->duration = duration; // Duration in milliseconds
cs->intensity = ((intensity * 6710886) + 1023410176); // Intensity ranges from 1023410176 to 1090519040, so simplify it from 0 to 10.
entity_list.QueueClients(0, outapp);
safe_delete(outapp);
}
NPC *EntityList::GetClosestBanker(Mob *sender, uint32 &distance)
{
if (!sender)
return nullptr;
distance = 4294967295u;
NPC *nc = nullptr;
auto it = npc_list.begin();
while (it != npc_list.end()) {
if (it->second->GetClass() == BANKER) {
uint32 nd = ((it->second->GetY() - sender->GetY()) * (it->second->GetY() - sender->GetY())) +
((it->second->GetX() - sender->GetX()) * (it->second->GetX() - sender->GetX()));
if (nd < distance){
distance = nd;
nc = it->second;
}
}
++it;
}
return nc;
}
void EntityList::ExpeditionWarning(uint32 minutes_left)
{
auto outapp = new EQApplicationPacket(OP_DzExpeditionEndsWarning, sizeof(ExpeditionExpireWarning));
ExpeditionExpireWarning *ew = (ExpeditionExpireWarning*)outapp->pBuffer;
ew->minutes_remaining = minutes_left;
auto it = client_list.begin();
while (it != client_list.end()) {
it->second->MessageString(Chat::Yellow, EXPEDITION_MIN_REMAIN, itoa((int)minutes_left));
it->second->QueuePacket(outapp);
++it;
}
safe_delete(outapp);
}
Mob *EntityList::GetClosestMobByBodyType(Mob *sender, bodyType BodyType)
{
if (!sender)
return nullptr;
uint32 CurrentDistance, ClosestDistance = 4294967295u;
Mob *CurrentMob, *ClosestMob = nullptr;
auto it = mob_list.begin();
while (it != mob_list.end()) {
CurrentMob = it->second;
++it;
if (CurrentMob->GetBodyType() != BodyType)
continue;
CurrentDistance = ((CurrentMob->GetY() - sender->GetY()) * (CurrentMob->GetY() - sender->GetY())) +
((CurrentMob->GetX() - sender->GetX()) * (CurrentMob->GetX() - sender->GetX()));
if (CurrentDistance < ClosestDistance) {
ClosestDistance = CurrentDistance;
ClosestMob = CurrentMob;
}
}
return ClosestMob;
}
void EntityList::GetTargetsForConeArea(Mob *start, float min_radius, float radius, float height, int pcnpc, std::list<Mob*> &m_list)
{
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *ptr = it->second;
if (ptr == start) {
++it;
continue;
}
// check PC/NPC only flag 1 = PCs, 2 = NPCs
if (pcnpc == 1 && !ptr->IsClient() && !ptr->IsMerc() && !ptr->IsBot()) {
++it;
continue;
} else if (pcnpc == 2 && (ptr->IsClient() || ptr->IsMerc() || ptr->IsBot())) {
++it;
continue;
}
float x_diff = ptr->GetX() - start->GetX();
float y_diff = ptr->GetY() - start->GetY();
float z_diff = ptr->GetZ() - start->GetZ();
x_diff *= x_diff;
y_diff *= y_diff;
z_diff *= z_diff;
if ((x_diff + y_diff) <= (radius * radius) && (x_diff + y_diff) >= (min_radius * min_radius))
if(z_diff <= (height * height))
m_list.push_back(ptr);
++it;
}
}
Client *EntityList::FindCorpseDragger(uint16 CorpseID)
{
auto it = client_list.begin();
while (it != client_list.end()) {
if (it->second->IsDraggingCorpse(CorpseID))
return it->second;
++it;
}
return nullptr;
}
Mob *EntityList::GetTargetForVirus(Mob *spreader, int range)
{
int max_spread_range = RuleI(Spells, VirusSpreadDistance);
if (range)
max_spread_range = range;
std::vector<Mob *> TargetsInRange;
auto it = mob_list.begin();
while (it != mob_list.end()) {
Mob *cur = it->second;
// Make sure the target is in range, has los and is not the mob doing the spreading
if ((cur->GetID() != spreader->GetID()) &&
(cur->CalculateDistance(spreader->GetX(), spreader->GetY(),
spreader->GetZ()) <= max_spread_range) &&
(spreader->CheckLosFN(cur))) {
// If the spreader is an npc it can only spread to other npc controlled mobs
if (spreader->IsNPC() && !spreader->IsPet() && !spreader->CastToNPC()->GetSwarmOwner() && cur->IsNPC()) {
TargetsInRange.push_back(cur);
}
// If the spreader is an npc controlled pet it can spread to any other npc or an npc controlled pet
else if (spreader->IsNPC() && spreader->IsPet() && spreader->GetOwner()->IsNPC()) {
if (cur->IsNPC() && !cur->IsPet()) {
TargetsInRange.push_back(cur);
} else if (cur->IsNPC() && cur->IsPet() && cur->GetOwner()->IsNPC()) {
TargetsInRange.push_back(cur);
}
else if (cur->IsNPC() && cur->CastToNPC()->GetSwarmOwner() && cur->GetOwner()->IsNPC()) {
TargetsInRange.push_back(cur);
}
}
// if the spreader is anything else(bot, pet, etc) then it should spread to everything but non client controlled npcs
else if (!spreader->IsNPC() && !cur->IsNPC()) {
TargetsInRange.push_back(cur);
}
// if its a pet we need to determine appropriate targets(pet to client, pet to pet, pet to bot, etc)
else if (spreader->IsNPC() && (spreader->IsPet() || spreader->CastToNPC()->GetSwarmOwner()) && !spreader->GetOwner()->IsNPC()) {
if (!cur->IsNPC()) {
TargetsInRange.push_back(cur);
}
else if (cur->IsNPC() && (cur->IsPet() || cur->CastToNPC()->GetSwarmOwner()) && !cur->GetOwner()->IsNPC()) {
TargetsInRange.push_back(cur);
}
}
}
++it;
}
if(TargetsInRange.empty())
return nullptr;
return TargetsInRange[zone->random.Int(0, TargetsInRange.size() - 1)];
}
void EntityList::StopMobAI()
{
for (auto &mob : mob_list) {
mob.second->AI_Stop();
mob.second->AI_ShutDown();
}
}
void EntityList::SendAlternateAdvancementStats() {
for(auto &c : client_list) {
c.second->SendAlternateAdvancementTable();
c.second->SendAlternateAdvancementStats();
c.second->SendAlternateAdvancementPoints();
}
}
void EntityList::ReloadMerchants() {
for (auto it = npc_list.begin();it != npc_list.end(); ++it) {
NPC *cur = it->second;
if (cur->MerchantType != 0) {
zone->LoadNewMerchantData(cur->MerchantType);
}
}
}
/**
* If we have a distance requested that is greater than our scanning distance
* then we return the full list
*
* See comments @EntityList::ScanCloseMobs for system explanation
*
* @param mob
* @param distance
* @return
*/
std::unordered_map<uint16, Mob *> &EntityList::GetCloseMobList(Mob *mob, float distance)
{
if (distance <= RuleI(Range, MobCloseScanDistance)) {
return mob->close_mobs;
}
return mob_list;
}
void EntityList::GateAllClientsToSafeReturn()
{
DynamicZone* dz = zone ? zone->GetDynamicZone() : nullptr;
for (const auto& client_list_iter : client_list)
{
if (client_list_iter.second)
{
// falls back to gating clients to bind if dz invalid
client_list_iter.second->GoToDzSafeReturnOrBind(dz);
}
}
}
int EntityList::MovePlayerCorpsesToGraveyard(bool force_move_from_instance)
{
if (!zone)
{
return 0;
}
int moved_count = 0;
for (auto it = corpse_list.begin(); it != corpse_list.end();)
{
bool moved = false;
if (it->second && it->second->IsPlayerCorpse())
{
if (zone->HasGraveyard())
{
moved = it->second->MovePlayerCorpseToGraveyard();
}
else if (force_move_from_instance && zone->GetInstanceID() != 0)
{
moved = it->second->MovePlayerCorpseToNonInstance();
}
}
if (moved)
{
safe_delete(it->second);
free_ids.push(it->first);
it = corpse_list.erase(it);
++moved_count;
}
else
{
++it;
}
}
return moved_count;
}
void EntityList::DespawnGridNodes(int32 grid_id) {
for (auto mob_iterator : mob_list) {
Mob *mob = mob_iterator.second;
if (mob->IsNPC() && mob->GetRace() == 2254 && mob->EntityVariableExists("grid_id") && atoi(mob->GetEntityVariable("grid_id")) == grid_id) {
mob->Depop();
}
}
}
| 1 | 10,768 | `c_str()` is not needed. | EQEmu-Server | cpp |
@@ -310,9 +310,12 @@ func installHandlers(c *ExtraConfig, s *genericapiserver.GenericAPIServer) {
})
}
+ if features.DefaultFeatureGate.Enabled(features.Egress) || features.DefaultFeatureGate.Enabled(features.ServiceExternalIP) {
+ s.Handler.NonGoRestfulMux.HandleFunc("/validate/externalippool", webhook.HandlerForValidateFunc(c.externalIPPoolController.ValidateExternalIPPool))
+ }
+
if features.DefaultFeatureGate.Enabled(features.Egress) {
s.Handler.NonGoRestfulMux.HandleFunc("/validate/egress", webhook.HandlerForValidateFunc(c.egressController.ValidateEgress))
- s.Handler.NonGoRestfulMux.HandleFunc("/validate/externalippool", webhook.HandlerForValidateFunc(c.externalIPPoolController.ValidateExternalIPPool))
}
if features.DefaultFeatureGate.Enabled(features.AntreaIPAM) { | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apiserver
import (
"context"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"antrea.io/antrea/pkg/apis/controlplane"
cpinstall "antrea.io/antrea/pkg/apis/controlplane/install"
apistats "antrea.io/antrea/pkg/apis/stats"
statsinstall "antrea.io/antrea/pkg/apis/stats/install"
systeminstall "antrea.io/antrea/pkg/apis/system/install"
system "antrea.io/antrea/pkg/apis/system/v1beta1"
"antrea.io/antrea/pkg/apiserver/certificate"
"antrea.io/antrea/pkg/apiserver/handlers/endpoint"
"antrea.io/antrea/pkg/apiserver/handlers/featuregates"
"antrea.io/antrea/pkg/apiserver/handlers/loglevel"
"antrea.io/antrea/pkg/apiserver/handlers/webhook"
"antrea.io/antrea/pkg/apiserver/registry/controlplane/egressgroup"
"antrea.io/antrea/pkg/apiserver/registry/controlplane/nodestatssummary"
"antrea.io/antrea/pkg/apiserver/registry/networkpolicy/addressgroup"
"antrea.io/antrea/pkg/apiserver/registry/networkpolicy/appliedtogroup"
"antrea.io/antrea/pkg/apiserver/registry/networkpolicy/clustergroupmember"
"antrea.io/antrea/pkg/apiserver/registry/networkpolicy/groupassociation"
"antrea.io/antrea/pkg/apiserver/registry/networkpolicy/networkpolicy"
"antrea.io/antrea/pkg/apiserver/registry/stats/antreaclusternetworkpolicystats"
"antrea.io/antrea/pkg/apiserver/registry/stats/antreanetworkpolicystats"
"antrea.io/antrea/pkg/apiserver/registry/stats/networkpolicystats"
"antrea.io/antrea/pkg/apiserver/registry/system/controllerinfo"
"antrea.io/antrea/pkg/apiserver/registry/system/supportbundle"
"antrea.io/antrea/pkg/apiserver/storage"
"antrea.io/antrea/pkg/controller/egress"
"antrea.io/antrea/pkg/controller/externalippool"
"antrea.io/antrea/pkg/controller/ipam"
controllernetworkpolicy "antrea.io/antrea/pkg/controller/networkpolicy"
"antrea.io/antrea/pkg/controller/querier"
"antrea.io/antrea/pkg/controller/stats"
"antrea.io/antrea/pkg/features"
legacycontrolplane "antrea.io/antrea/pkg/legacyapis/controlplane"
legacycpinstall "antrea.io/antrea/pkg/legacyapis/controlplane/install"
legacyapistats "antrea.io/antrea/pkg/legacyapis/stats"
legacystatsinstall "antrea.io/antrea/pkg/legacyapis/stats/install"
legacysysteminstall "antrea.io/antrea/pkg/legacyapis/system/install"
legacysystem "antrea.io/antrea/pkg/legacyapis/system/v1beta1"
)
var (
// Scheme defines methods for serializing and deserializing API objects.
Scheme = runtime.NewScheme()
// Codecs provides methods for retrieving codecs and serializers for specific
// versions and content types.
Codecs = serializer.NewCodecFactory(Scheme)
// #nosec G101: false positive triggered by variable name which includes "token"
TokenPath = "/var/run/antrea/apiserver/loopback-client-token"
)
func init() {
cpinstall.Install(Scheme)
systeminstall.Install(Scheme)
statsinstall.Install(Scheme)
legacycpinstall.Install(Scheme)
legacysysteminstall.Install(Scheme)
legacystatsinstall.Install(Scheme)
// We need to add the options to empty v1, see sample-apiserver/pkg/apiserver/apiserver.go.
metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
}
// ExtraConfig holds custom apiserver config.
type ExtraConfig struct {
k8sClient kubernetes.Interface
addressGroupStore storage.Interface
appliedToGroupStore storage.Interface
networkPolicyStore storage.Interface
egressGroupStore storage.Interface
controllerQuerier querier.ControllerQuerier
endpointQuerier controllernetworkpolicy.EndpointQuerier
networkPolicyController *controllernetworkpolicy.NetworkPolicyController
egressController *egress.EgressController
externalIPPoolController *externalippool.ExternalIPPoolController
caCertController *certificate.CACertController
statsAggregator *stats.Aggregator
networkPolicyStatusController *controllernetworkpolicy.StatusController
}
// Config defines the config for Antrea apiserver.
type Config struct {
genericConfig *genericapiserver.Config
extraConfig ExtraConfig
}
// APIServer contains state for a Kubernetes cluster apiserver.
type APIServer struct {
GenericAPIServer *genericapiserver.GenericAPIServer
caCertController *certificate.CACertController
}
func (s *APIServer) Run(stopCh <-chan struct{}) error {
// Make sure CACertController runs once to publish the CA cert before starting APIServer.
if err := s.caCertController.RunOnce(); err != nil {
klog.Warningf("caCertController RunOnce failed: %v", err)
}
go s.caCertController.Run(1, stopCh)
return s.GenericAPIServer.PrepareRun().Run(stopCh)
}
type completedConfig struct {
genericConfig genericapiserver.CompletedConfig
extraConfig *ExtraConfig
}
func NewConfig(
genericConfig *genericapiserver.Config,
k8sClient kubernetes.Interface,
addressGroupStore, appliedToGroupStore, networkPolicyStore, groupStore, egressGroupStore storage.Interface,
caCertController *certificate.CACertController,
statsAggregator *stats.Aggregator,
controllerQuerier querier.ControllerQuerier,
networkPolicyStatusController *controllernetworkpolicy.StatusController,
endpointQuerier controllernetworkpolicy.EndpointQuerier,
npController *controllernetworkpolicy.NetworkPolicyController,
egressController *egress.EgressController) *Config {
return &Config{
genericConfig: genericConfig,
extraConfig: ExtraConfig{
k8sClient: k8sClient,
addressGroupStore: addressGroupStore,
appliedToGroupStore: appliedToGroupStore,
networkPolicyStore: networkPolicyStore,
egressGroupStore: egressGroupStore,
caCertController: caCertController,
statsAggregator: statsAggregator,
controllerQuerier: controllerQuerier,
endpointQuerier: endpointQuerier,
networkPolicyController: npController,
networkPolicyStatusController: networkPolicyStatusController,
egressController: egressController,
},
}
}
func (c *Config) Complete(informers informers.SharedInformerFactory) completedConfig {
return completedConfig{c.genericConfig.Complete(informers), &c.extraConfig}
}
func installAPIGroup(s *APIServer, c completedConfig) error {
addressGroupStorage := addressgroup.NewREST(c.extraConfig.addressGroupStore)
appliedToGroupStorage := appliedtogroup.NewREST(c.extraConfig.appliedToGroupStore)
networkPolicyStorage := networkpolicy.NewREST(c.extraConfig.networkPolicyStore)
networkPolicyStatusStorage := networkpolicy.NewStatusREST(c.extraConfig.networkPolicyStatusController)
clusterGroupMembershipStorage := clustergroupmember.NewREST(c.extraConfig.networkPolicyController)
groupAssociationStorage := groupassociation.NewREST(c.extraConfig.networkPolicyController)
nodeStatsSummaryStorage := nodestatssummary.NewREST(c.extraConfig.statsAggregator)
egressGroupStorage := egressgroup.NewREST(c.extraConfig.egressGroupStore)
cpGroup := genericapiserver.NewDefaultAPIGroupInfo(controlplane.GroupName, Scheme, metav1.ParameterCodec, Codecs)
cpv1beta2Storage := map[string]rest.Storage{}
cpv1beta2Storage["addressgroups"] = addressGroupStorage
cpv1beta2Storage["appliedtogroups"] = appliedToGroupStorage
cpv1beta2Storage["networkpolicies"] = networkPolicyStorage
cpv1beta2Storage["networkpolicies/status"] = networkPolicyStatusStorage
cpv1beta2Storage["nodestatssummaries"] = nodeStatsSummaryStorage
cpv1beta2Storage["groupassociations"] = groupAssociationStorage
cpv1beta2Storage["clustergroupmembers"] = clusterGroupMembershipStorage
cpv1beta2Storage["egressgroups"] = egressGroupStorage
cpGroup.VersionedResourcesStorageMap["v1beta2"] = cpv1beta2Storage
systemGroup := genericapiserver.NewDefaultAPIGroupInfo(system.GroupName, Scheme, metav1.ParameterCodec, Codecs)
systemStorage := map[string]rest.Storage{}
systemStorage["controllerinfos"] = controllerinfo.NewREST(c.extraConfig.controllerQuerier)
bundleStorage := supportbundle.NewControllerStorage()
systemStorage["supportbundles"] = bundleStorage.SupportBundle
systemStorage["supportbundles/download"] = bundleStorage.Download
systemGroup.VersionedResourcesStorageMap["v1beta1"] = systemStorage
statsGroup := genericapiserver.NewDefaultAPIGroupInfo(apistats.GroupName, Scheme, metav1.ParameterCodec, Codecs)
statsStorage := map[string]rest.Storage{}
statsStorage["networkpolicystats"] = networkpolicystats.NewREST(c.extraConfig.statsAggregator)
statsStorage["antreaclusternetworkpolicystats"] = antreaclusternetworkpolicystats.NewREST(c.extraConfig.statsAggregator)
statsStorage["antreanetworkpolicystats"] = antreanetworkpolicystats.NewREST(c.extraConfig.statsAggregator)
statsGroup.VersionedResourcesStorageMap["v1alpha1"] = statsStorage
groups := []*genericapiserver.APIGroupInfo{&cpGroup, &systemGroup, &statsGroup}
// legacy groups
legacyCPGroup := genericapiserver.NewDefaultAPIGroupInfo(legacycontrolplane.GroupName, Scheme, metav1.ParameterCodec, Codecs)
legacyCPv1beta2Storage := map[string]rest.Storage{}
legacyCPv1beta2Storage["addressgroups"] = addressGroupStorage
legacyCPv1beta2Storage["appliedtogroups"] = appliedToGroupStorage
legacyCPv1beta2Storage["networkpolicies"] = networkPolicyStorage
legacyCPv1beta2Storage["networkpolicies/status"] = networkPolicyStatusStorage
legacyCPv1beta2Storage["nodestatssummaries"] = nodeStatsSummaryStorage
legacyCPv1beta2Storage["groupassociations"] = groupAssociationStorage
legacyCPv1beta2Storage["clustergroupmembers"] = clusterGroupMembershipStorage
legacyCPGroup.VersionedResourcesStorageMap["v1beta2"] = legacyCPv1beta2Storage
legacySystemGroup := genericapiserver.NewDefaultAPIGroupInfo(legacysystem.GroupName, Scheme, metav1.ParameterCodec, Codecs)
legacySystemGroup.VersionedResourcesStorageMap["v1beta1"] = systemStorage
legacyStatsGroup := genericapiserver.NewDefaultAPIGroupInfo(legacyapistats.GroupName, Scheme, metav1.ParameterCodec, Codecs)
legacyStatsGroup.VersionedResourcesStorageMap["v1alpha1"] = statsStorage
// legacy API groups
groups = append(groups, &legacyCPGroup, &legacySystemGroup, &legacyStatsGroup)
for _, apiGroupInfo := range groups {
if err := s.GenericAPIServer.InstallAPIGroup(apiGroupInfo); err != nil {
return err
}
}
return nil
}
func (c completedConfig) New() (*APIServer, error) {
genericServer, err := c.genericConfig.New("antrea-apiserver", genericapiserver.NewEmptyDelegate())
if err != nil {
return nil, err
}
s := &APIServer{
GenericAPIServer: genericServer,
caCertController: c.extraConfig.caCertController,
}
if err := installAPIGroup(s, c); err != nil {
return nil, err
}
installHandlers(c.extraConfig, s.GenericAPIServer)
return s, nil
}
// CleanupDeprecatedAPIServices deletes the registered APIService resources for
// the deprecated Antrea API groups.
func CleanupDeprecatedAPIServices(aggregatorClient clientset.Interface) error {
// The APIService of a deprecated API group should be added to the slice.
// After Antrea upgrades from an old version to a new version that
// deprecates a registered APIService, the APIService should be deleted,
// otherwise K8s will fail to delete an existing Namespace.
// Also check: https://github.com/antrea-io/antrea/issues/494
deprecatedAPIServices := []string{
"v1beta1.networking.antrea.tanzu.vmware.com",
"v1beta1.controlplane.antrea.tanzu.vmware.com",
}
for _, as := range deprecatedAPIServices {
err := aggregatorClient.ApiregistrationV1().APIServices().Delete(context.TODO(), as, metav1.DeleteOptions{})
if err == nil {
klog.Infof("Deleted the deprecated APIService %s", as)
} else if !apierrors.IsNotFound(err) {
return err
}
}
return nil
}
func installHandlers(c *ExtraConfig, s *genericapiserver.GenericAPIServer) {
s.Handler.NonGoRestfulMux.HandleFunc("/loglevel", loglevel.HandleFunc())
s.Handler.NonGoRestfulMux.HandleFunc("/featuregates", featuregates.HandleFunc(c.k8sClient))
s.Handler.NonGoRestfulMux.HandleFunc("/endpoint", endpoint.HandleFunc(c.endpointQuerier))
// Webhook to mutate Namespace labels and add its metadata.name as a label
s.Handler.NonGoRestfulMux.HandleFunc("/mutate/namespace", webhook.HandleMutationLabels())
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
// Get new NetworkPolicyMutator
m := controllernetworkpolicy.NewNetworkPolicyMutator(c.networkPolicyController)
// Install handlers for NetworkPolicy related mutation
s.Handler.NonGoRestfulMux.HandleFunc("/mutate/acnp", webhook.HandleMutationNetworkPolicy(m))
s.Handler.NonGoRestfulMux.HandleFunc("/mutate/anp", webhook.HandleMutationNetworkPolicy(m))
// Get new NetworkPolicyValidator
v := controllernetworkpolicy.NewNetworkPolicyValidator(c.networkPolicyController)
// Install handlers for NetworkPolicy related validation
s.Handler.NonGoRestfulMux.HandleFunc("/validate/tier", webhook.HandlerForValidateFunc(v.Validate))
s.Handler.NonGoRestfulMux.HandleFunc("/validate/acnp", webhook.HandlerForValidateFunc(v.Validate))
s.Handler.NonGoRestfulMux.HandleFunc("/validate/anp", webhook.HandlerForValidateFunc(v.Validate))
s.Handler.NonGoRestfulMux.HandleFunc("/validate/clustergroup", webhook.HandlerForValidateFunc(v.Validate))
// Install handlers for CRD conversion between versions
s.Handler.NonGoRestfulMux.HandleFunc("/convert/clustergroup", webhook.HandleCRDConversion(controllernetworkpolicy.ConvertClusterGroupCRD))
// Install a post start hook to initialize Tiers on start-up
s.AddPostStartHook("initialize-tiers", func(context genericapiserver.PostStartHookContext) error {
go c.networkPolicyController.InitializeTiers()
return nil
})
}
if features.DefaultFeatureGate.Enabled(features.Egress) {
s.Handler.NonGoRestfulMux.HandleFunc("/validate/egress", webhook.HandlerForValidateFunc(c.egressController.ValidateEgress))
s.Handler.NonGoRestfulMux.HandleFunc("/validate/externalippool", webhook.HandlerForValidateFunc(c.externalIPPoolController.ValidateExternalIPPool))
}
if features.DefaultFeatureGate.Enabled(features.AntreaIPAM) {
s.Handler.NonGoRestfulMux.HandleFunc("/validate/ippool", webhook.HandlerForValidateFunc(ipam.ValidateIPPool))
}
}
func DefaultCAConfig() *certificate.CAConfig {
return &certificate.CAConfig{
CAConfigMapName: certificate.AntreaCAConfigMapName,
APIServiceNames: []string{
"v1alpha1.stats.antrea.tanzu.vmware.com",
"v1beta2.controlplane.antrea.tanzu.vmware.com",
"v1beta1.system.antrea.tanzu.vmware.com",
"v1alpha1.stats.antrea.io",
"v1beta1.system.antrea.io",
"v1beta2.controlplane.antrea.io",
},
ValidatingWebhooks: []string{
"crdvalidator.antrea.tanzu.vmware.com",
"crdvalidator.antrea.io",
},
MutationWebhooks: []string{
"crdmutator.antrea.tanzu.vmware.com",
"crdmutator.antrea.io",
},
OptionalMutationWebhooks: []string{
"labelsmutator.antrea.io",
},
CRDsWithConversionWebhooks: []string{
"clustergroups.crd.antrea.io",
},
CertDir: "/var/run/antrea/antrea-controller-tls",
SelfSignedCertDir: "/var/run/antrea/antrea-controller-self-signed",
CertReadyTimeout: 2 * time.Minute,
MaxRotateDuration: time.Hour * (24 * 365),
ServiceName: certificate.AntreaServiceName,
PairName: "antrea-controller",
}
}
| 1 | 50,348 | The handler will be installed twice if you don't remove the below one. | antrea-io-antrea | go |
@@ -35,9 +35,14 @@ public class EthashConfigOptions {
return JsonUtil.getLong(ethashConfigRoot, "fixeddifficulty");
}
+ public OptionalLong getEpochLengthActivationBlock() {
+ return JsonUtil.getLong(ethashConfigRoot, "epochlengthactivation");
+ }
+
Map<String, Object> asMap() {
final ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
getFixedDifficulty().ifPresent(l -> builder.put("fixeddifficulty", l));
+ getEpochLengthActivationBlock().ifPresent(a -> builder.put("epochlengthactivation", a));
return builder.build();
}
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.config;
import java.util.Map;
import java.util.OptionalLong;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.ImmutableMap;
public class EthashConfigOptions {
public static final EthashConfigOptions DEFAULT =
new EthashConfigOptions(JsonUtil.createEmptyObjectNode());
private final ObjectNode ethashConfigRoot;
EthashConfigOptions(final ObjectNode ethashConfigRoot) {
this.ethashConfigRoot = ethashConfigRoot;
}
public OptionalLong getFixedDifficulty() {
return JsonUtil.getLong(ethashConfigRoot, "fixeddifficulty");
}
Map<String, Object> asMap() {
final ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
getFixedDifficulty().ifPresent(l -> builder.put("fixeddifficulty", l));
return builder.build();
}
}
| 1 | 23,541 | How about putting `epochLength` (or something like that) in place of `a` just to make it easier to read | hyperledger-besu | java |
@@ -79,6 +79,14 @@ func (graph *BuildGraph) Target(label BuildLabel) *BuildTarget {
func (graph *BuildGraph) TargetOrDie(label BuildLabel) *BuildTarget {
target := graph.Target(label)
if target == nil {
+ // TODO(jpoole): This is just a small usability message to help with the migration from v15 to v16. We should
+ // probably remove this after a grace period.
+ if label.Subrepo == "pleasings" {
+ if _, ok := graph.subrepos["pleasings"]; !ok {
+ log.Warning("You've tried to use the pleasings sub-repo. This is no longer included automatically.")
+ log.Warning("Use `plz init pleasings` to add the pleasings repo to your project.")
+ }
+ }
log.Fatalf("Target %s not found in build graph\n", label)
}
return target | 1 | // Representation of the build graph.
// The graph of build targets forms a DAG which we discover from the top
// down and then build bottom-up.
package core
import (
"reflect"
"sort"
"sync"
)
// A BuildGraph contains all the loaded targets and packages and maintains their
// relationships, especially reverse dependencies which are calculated here.
// It also arbitrates access to a lot of things via its builtin mutex which
// is probably our most overused lock :(
type BuildGraph struct {
// Map of all currently known targets by their label.
targets map[BuildLabel]*BuildTarget
// Map of all currently known packages.
packages map[packageKey]*Package
// Reverse dependencies that are pending on targets actually being added to the graph.
pendingRevDeps map[BuildLabel]map[BuildLabel]*BuildTarget
// Actual reverse dependencies
revDeps map[BuildLabel][]*BuildTarget
// Registered subrepos, as a map of their name to their root.
subrepos map[string]*Subrepo
// Used to arbitrate access to the graph. We parallelise most build operations
// and Go maps aren't natively threadsafe so this is needed.
mutex sync.RWMutex
}
// AddTarget adds a new target to the graph.
func (graph *BuildGraph) AddTarget(target *BuildTarget) *BuildTarget {
graph.mutex.Lock()
defer graph.mutex.Unlock()
if _, present := graph.targets[target.Label]; present {
panic("Attempted to re-add existing target to build graph: " + target.Label.String())
}
graph.targets[target.Label] = target
// Register any of its dependencies now
for _, dep := range target.DeclaredDependencies() {
graph.addDependencyForTarget(target, dep)
}
// Check these reverse deps which may have already been added against this target.
revdeps, present := graph.pendingRevDeps[target.Label]
if present {
for revdep, originalTarget := range revdeps {
if originalTarget != nil {
graph.linkDependencies(graph.targets[revdep], originalTarget)
} else {
graph.linkDependencies(graph.targets[revdep], target)
}
}
delete(graph.pendingRevDeps, target.Label) // Don't need any more
}
return target
}
// AddPackage adds a new package to the graph with given name.
func (graph *BuildGraph) AddPackage(pkg *Package) {
key := packageKey{Name: pkg.Name, Subrepo: pkg.SubrepoName}
graph.mutex.Lock()
defer graph.mutex.Unlock()
if _, present := graph.packages[key]; present {
panic("Attempt to readd existing package: " + key.String())
}
graph.packages[key] = pkg
}
// Target retrieves a target from the graph by label
func (graph *BuildGraph) Target(label BuildLabel) *BuildTarget {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
return graph.targets[label]
}
// TargetOrDie retrieves a target from the graph by label. Dies if the target doesn't exist.
func (graph *BuildGraph) TargetOrDie(label BuildLabel) *BuildTarget {
target := graph.Target(label)
if target == nil {
log.Fatalf("Target %s not found in build graph\n", label)
}
return target
}
// PackageByLabel retrieves a package from the graph using the appropriate parts of the given label.
// The Name entry is ignored.
func (graph *BuildGraph) PackageByLabel(label BuildLabel) *Package {
return graph.Package(label.PackageName, label.Subrepo)
}
// Package retrieves a package from the graph by name & subrepo, or nil if it can't be found.
func (graph *BuildGraph) Package(name, subrepo string) *Package {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
return graph.packages[packageKey{Name: name, Subrepo: subrepo}]
}
// PackageOrDie retrieves a package by label, and dies if it can't be found.
func (graph *BuildGraph) PackageOrDie(label BuildLabel) *Package {
pkg := graph.PackageByLabel(label)
if pkg == nil {
log.Fatalf("Package %s doesn't exist in graph", packageKey{Name: label.PackageName, Subrepo: label.Subrepo})
}
return pkg
}
// AddSubrepo adds a new subrepo to the graph. It dies if one is already registered by this name.
func (graph *BuildGraph) AddSubrepo(subrepo *Subrepo) {
graph.mutex.Lock()
defer graph.mutex.Unlock()
if _, present := graph.subrepos[subrepo.Name]; present {
log.Fatalf("Subrepo %s is already registered", subrepo.Name)
}
graph.subrepos[subrepo.Name] = subrepo
}
// MaybeAddSubrepo adds the given subrepo to the graph, or returns the existing one if one is already registered.
func (graph *BuildGraph) MaybeAddSubrepo(subrepo *Subrepo) *Subrepo {
graph.mutex.Lock()
defer graph.mutex.Unlock()
if s, present := graph.subrepos[subrepo.Name]; present {
if !reflect.DeepEqual(s, subrepo) {
log.Fatalf("Found multiple definitions for subrepo '%s' (%+v s %+v)",
s.Name, s, subrepo)
}
return s
}
graph.subrepos[subrepo.Name] = subrepo
return subrepo
}
// Subrepo returns the subrepo with this name. It returns nil if one isn't registered.
func (graph *BuildGraph) Subrepo(name string) *Subrepo {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
return graph.subrepos[name]
}
// SubrepoOrDie returns the subrepo with this name, dying if it doesn't exist.
func (graph *BuildGraph) SubrepoOrDie(name string) *Subrepo {
subrepo := graph.Subrepo(name)
if subrepo == nil {
log.Fatalf("No registered subrepo by the name %s", name)
}
return subrepo
}
// Len returns the number of targets currently in the graph.
func (graph *BuildGraph) Len() int {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
return len(graph.targets)
}
// AllTargets returns a consistently ordered slice of all the targets in the graph.
func (graph *BuildGraph) AllTargets() BuildTargets {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
targets := make(BuildTargets, 0, len(graph.targets))
for _, target := range graph.targets {
targets = append(targets, target)
}
sort.Sort(targets)
return targets
}
// PackageMap returns a copy of the graph's internal map of name to package.
func (graph *BuildGraph) PackageMap() map[string]*Package {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
packages := make(map[string]*Package, len(graph.packages))
for k, v := range graph.packages {
packages[k.String()] = v
}
return packages
}
// AddDependency adds a dependency between two build targets.
// The 'to' target doesn't necessarily have to exist in the graph yet (but 'from' must).
func (graph *BuildGraph) AddDependency(from BuildLabel, to BuildLabel) {
graph.mutex.Lock()
defer graph.mutex.Unlock()
graph.addDependencyForTarget(graph.targets[from], to)
}
// addDependencyForTarget adds a dependency between two build targets.
// The 'to' target doesn't necessarily have to exist in the graph yet.
// The caller must already hold the lock before calling this.
func (graph *BuildGraph) addDependencyForTarget(fromTarget *BuildTarget, to BuildLabel) {
// We might have done this already; do a quick check here first.
if fromTarget.hasResolvedDependency(to) {
return
}
toTarget, present := graph.targets[to]
// The dependency may not exist yet if we haven't parsed its package.
// In that case we stash it away for later.
if !present {
graph.addPendingRevDep(fromTarget.Label, to, nil)
} else {
graph.linkDependencies(fromTarget, toTarget)
}
}
// NewGraph constructs and returns a new BuildGraph.
// Users should not attempt to construct one themselves.
func NewGraph() *BuildGraph {
return &BuildGraph{
targets: map[BuildLabel]*BuildTarget{},
packages: map[packageKey]*Package{},
pendingRevDeps: map[BuildLabel]map[BuildLabel]*BuildTarget{},
revDeps: map[BuildLabel][]*BuildTarget{},
subrepos: map[string]*Subrepo{},
}
}
// ReverseDependencies returns the set of revdeps on the given target.
func (graph *BuildGraph) ReverseDependencies(target *BuildTarget) []*BuildTarget {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
if revdeps, present := graph.revDeps[target.Label]; present {
return revdeps[:]
}
return []*BuildTarget{}
}
// AllDepsBuilt returns true if all the dependencies of a target are built.
func (graph *BuildGraph) AllDepsBuilt(target *BuildTarget) bool {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
return target.allDepsBuilt()
}
// AllDependenciesResolved returns true once all the dependencies of a target have been
// parsed and resolved to real targets.
func (graph *BuildGraph) AllDependenciesResolved(target *BuildTarget) bool {
graph.mutex.RLock()
defer graph.mutex.RUnlock()
return target.allDependenciesResolved()
}
// linkDependencies adds the dependency of fromTarget on toTarget and the corresponding
// reverse dependency in the other direction.
// This is complicated somewhat by the require/provide mechanism which is resolved at this
// point, but some of the dependencies may not yet exist.
func (graph *BuildGraph) linkDependencies(fromTarget, toTarget *BuildTarget) {
for _, label := range toTarget.ProvideFor(fromTarget) {
if target, present := graph.targets[label]; present {
fromTarget.resolveDependency(toTarget.Label, target)
graph.revDeps[label] = append(graph.revDeps[label], fromTarget)
} else {
graph.addPendingRevDep(fromTarget.Label, label, toTarget)
}
}
}
func (graph *BuildGraph) addPendingRevDep(from, to BuildLabel, orig *BuildTarget) {
if deps, present := graph.pendingRevDeps[to]; present {
deps[from] = orig
} else {
graph.pendingRevDeps[to] = map[BuildLabel]*BuildTarget{from: orig}
}
}
// DependentTargets returns the labels that 'from' should actually depend on when it declared a dependency on 'to'.
// This is normally just 'to' but could be otherwise given require/provide shenanigans.
func (graph *BuildGraph) DependentTargets(from, to BuildLabel) []BuildLabel {
fromTarget := graph.Target(from)
if toTarget := graph.Target(to); fromTarget != nil && toTarget != nil {
graph.mutex.Lock()
defer graph.mutex.Unlock()
return toTarget.ProvideFor(fromTarget)
}
return []BuildLabel{to}
}
| 1 | 9,270 | Wouldn't this still fire afterwards if you created one called `pleasings`? or am I missing something? | thought-machine-please | go |
@@ -63,14 +63,14 @@ const kbpRecordPrefix = "_keybase_pages."
// _keybase_pages.meatball.gao.io TXT "kbp=/keybase/public/songgao/meatball/"
// _keybase_pages.song.gao.io TXT "kbp=/keybase/private/songgao,kb_bot/blah"
// _keybase_pages.blah.strib.io TXT "kbp=/keybase/private/strib#kb_bot/blahblahb" "lah/blah/"
-// _keybase_pages.kbp.jzila.com TXT "kbp=git-keybase://private/jzila,kb_bot/kbp.git"
+// _keybase_pages.kbp.jzila.com TXT "kbp=git@keybase:private/jzila,kb_bot/kbp.git"
func LoadRootFromDNS(log *zap.Logger, domain string) (root Root, err error) {
var rootPath string
defer func() {
zapFields := []zapcore.Field{
zap.String("domain", domain),
- zap.String("root_path", rootPath),
+ zap.String("kbp_record", rootPath),
}
if err == nil {
log.Info("LoadRootFromDNS", zapFields...) | 1 | // Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libpages
import (
"net"
"strings"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
const (
keybasePagesPrefix = "kbp="
)
// ErrKeybasePagesRecordNotFound is returned when a domain requested doesn't
// have a kbp= record configured.
type ErrKeybasePagesRecordNotFound struct{}
// Error implements the error interface.
func (ErrKeybasePagesRecordNotFound) Error() string {
return "no TXT record is found for " + keybasePagesPrefix
}
// ErrKeybasePagesRecordTooMany is returned when a domain requested has more
// than one kbp= record configured.
type ErrKeybasePagesRecordTooMany struct{}
// Error implements the error interface.
func (ErrKeybasePagesRecordTooMany) Error() string {
return "more than 1 TXT record are found for " + keybasePagesPrefix
}
const kbpRecordPrefix = "_keybase_pages."
// LoadRootFromDNS loads the root path configured for domain, with following
// steps:
// 1. Construct a domain name by prefixing the `domain` parameter with
// "_keybase_pages.". So for example, "static.keybase.io" turns into
// "_keybase_pages.static.keybase.io".
// 2. Load TXT record(s) from the domain constructed in step 1, and look for
// one starting with "kbp=". If exactly one exists, parse it into a `Root`
// and return it.
//
// There must be exactly one "kbp=" TXT record configured for domain. If more
// than one exists, an ErrKeybasePagesRecordTooMany{} is returned. If none is
// found, an ErrKeybasePagesRecordNotFound{} is returned. In case user has some
// configuration that requires other records that we can't foresee for now,
// other records (TXT or not) can co-exist with the "kbp=" record (as long as
// no CNAME record exists on the "_keybase_pages." prefixed domain of course).
//
// If the given domain is invalid, it would cause the domain name constructed
// in step will be invalid too, which causes Go's DNS resolver to return a
// net.DNSError typed "no such host" error.
//
// Examples for "static.keybase.io", "meatball.gao.io", "song.gao.io",
// "blah.strib.io", and "kbp.jzila.com" respectively:
//
// _keybase_pages.static.keybase.io TXT "kbp=/keybase/team/keybase.bots/static.keybase.io"
// _keybase_pages.meatball.gao.io TXT "kbp=/keybase/public/songgao/meatball/"
// _keybase_pages.song.gao.io TXT "kbp=/keybase/private/songgao,kb_bot/blah"
// _keybase_pages.blah.strib.io TXT "kbp=/keybase/private/strib#kb_bot/blahblahb" "lah/blah/"
// _keybase_pages.kbp.jzila.com TXT "kbp=git-keybase://private/jzila,kb_bot/kbp.git"
func LoadRootFromDNS(log *zap.Logger, domain string) (root Root, err error) {
var rootPath string
defer func() {
zapFields := []zapcore.Field{
zap.String("domain", domain),
zap.String("root_path", rootPath),
}
if err == nil {
log.Info("LoadRootFromDNS", zapFields...)
} else {
log.Warn("LoadRootFromDNS", append(zapFields, zap.Error(err))...)
}
}()
txtRecords, err := net.LookupTXT(kbpRecordPrefix + domain)
if err != nil {
return Root{}, err
}
for _, r := range txtRecords {
r = strings.TrimSpace(r)
if strings.HasPrefix(r, keybasePagesPrefix) {
if len(rootPath) != 0 {
return Root{}, ErrKeybasePagesRecordTooMany{}
}
rootPath = r[len(keybasePagesPrefix):]
}
}
if len(rootPath) == 0 {
return Root{}, ErrKeybasePagesRecordNotFound{}
}
return ParseRoot(rootPath)
}
| 1 | 18,995 | I don't love this syntax; it doesn't match anything we're currently doing and it's not obvious. Why isn't this `/keybase/private/jzila,kb_bot/.kbfs_autogit/public/jzila/kbp.git`? | keybase-kbfs | go |
@@ -1135,11 +1135,17 @@ public final class Queue<T> extends AbstractsQueue<T, Queue<T>> implements Linea
return ofAll(toList().update(index, element));
}
- @SuppressWarnings("unchecked")
@Override
public <U> Queue<Tuple2<T, U>> zip(Iterable<? extends U> that) {
+ return zipWith(that, Tuple::of);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <U, R> Queue<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
- return ofAll(toList().zip((Iterable<U>) that));
+ Objects.requireNonNull(mapper, "mapper is null");
+ return ofAll(toList().zipWith((Iterable<U>) that, mapper));
}
@Override | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.control.Option;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
/**
* An immutable {@code Queue} stores elements allowing a first-in-first-out (FIFO) retrieval.
* <p>
* Queue API:
* <p>
* <ul>
* <li>{@link #dequeue()}</li>
* <li>{@link #dequeueOption()}</li>
* <li>{@link #enqueue(Object)}</li>
* <li>{@link #enqueue(Object[])}</li>
* <li>{@link #enqueueAll(Iterable)}</li>
* <li>{@link #peek()}</li>
* <li>{@link #peekOption()}</li>
* </ul>
* <p>
* A Queue internally consists of a front List containing the front elements of the Queue in the correct order and a
* rear List containing the rear elements of the Queue in reverse order.
* <p>
* When the front list is empty, front and rear are swapped and rear is reversed. This implies the following queue
* invariant: {@code front.isEmpty() => rear.isEmpty()}.
* <p>
* See Okasaki, Chris: <em>Purely Functional Data Structures</em> (p. 42 ff.). Cambridge, 2003.
*
* @param <T> Component type of the Queue
* @author Daniel Dietrich
* @since 2.0.0
*/
public final class Queue<T> extends AbstractsQueue<T, Queue<T>> implements LinearSeq<T>, Kind1<Queue<T>, T> {
private static final long serialVersionUID = 1L;
private static final Queue<?> EMPTY = new Queue<>(List.empty(), List.empty());
private final List<T> front;
private final List<T> rear;
/**
* Creates a Queue consisting of a front List and a rear List.
* <p>
* For a {@code Queue(front, rear)} the following invariant holds: {@code Queue is empty <=> front is empty}.
* In other words: If the Queue is not empty, the front List contains at least one element.
*
* @param front A List of front elements, in correct order.
* @param rear A List of rear elements, in reverse order.
*/
private Queue(List<T> front, List<T> rear) {
final boolean frontIsEmpty = front.isEmpty();
this.front = frontIsEmpty ? rear.reverse() : front;
this.rear = frontIsEmpty ? front : rear;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.Queue}
* .
*
* @param <T> Component type of the Queue.
* @return A javaslang.collection.Queue Collector.
*/
public static <T> Collector<T, ArrayList<T>, Queue<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, Queue<T>> finisher = Queue::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns the empty Queue.
*
* @param <T> Component type
* @return The empty Queue.
*/
@SuppressWarnings("unchecked")
public static <T> Queue<T> empty() {
return (Queue<T>) EMPTY;
}
/**
* Narrows a widened {@code Queue<? extends T>} to {@code Queue<T>}
* by performing a type safe-cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param queue An {@code Queue}.
* @param <T> Component type of the {@code Queue}.
* @return the given {@code queue} instance as narrowed type {@code Queue<T>}.
*/
@SuppressWarnings("unchecked")
public static <T> Queue<T> narrow(Queue<? extends T> queue) {
return (Queue<T>) queue;
}
/**
* Returns a singleton {@code Queue}, i.e. a {@code Queue} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new Queue instance containing the given element
*/
public static <T> Queue<T> of(T element) {
return ofAll(List.of(element));
}
/**
* Creates a Queue of the given elements.
*
* @param <T> Component type of the Queue.
* @param elements Zero or more elements.
* @return A queue containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SuppressWarnings("varargs")
@SafeVarargs
public static <T> Queue<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.of(elements));
}
/**
* Creates a Queue of the given elements.
*
* @param <T> Component type of the Queue.
* @param elements An Iterable of elements.
* @return A queue containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SuppressWarnings("unchecked")
public static <T> Queue<T> ofAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof Queue) {
return (Queue<T>) elements;
} else if (!elements.iterator().hasNext()) {
return empty();
} else if (elements instanceof List) {
return new Queue<>((List<T>) elements, List.empty());
} else {
return new Queue<>(List.ofAll(elements), List.empty());
}
}
/**
* Creates a Queue that contains the elements of the given {@link java.util.stream.Stream}.
*
* @param javaStream A {@link java.util.stream.Stream}
* @param <T> Component type of the Stream.
* @return A Queue containing the given elements in the same order.
*/
public static <T> Queue<T> ofAll(java.util.stream.Stream<? extends T> javaStream) {
Objects.requireNonNull(javaStream, "javaStream is null");
return new Queue<>(List.ofAll(javaStream), List.empty());
}
/**
* Creates a Queue based on the elements of a boolean array.
*
* @param array a boolean array
* @return A new Queue of Boolean values
* @throws NullPointerException if array is null
*/
public static Queue<Boolean> ofAll(boolean[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of a byte array.
*
* @param array a byte array
* @return A new Queue of Byte values
* @throws NullPointerException if array is null
*/
public static Queue<Byte> ofAll(byte[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of a char array.
*
* @param array a char array
* @return A new Queue of Character values
* @throws NullPointerException if array is null
*/
public static Queue<Character> ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of a double array.
*
* @param array a double array
* @return A new Queue of Double values
* @throws NullPointerException if array is null
*/
public static Queue<Double> ofAll(double[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of a float array.
*
* @param array a float array
* @return A new Queue of Float values
* @throws NullPointerException if array is null
*/
public static Queue<Float> ofAll(float[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of an int array.
*
* @param array an int array
* @return A new Queue of Integer values
* @throws NullPointerException if array is null
*/
public static Queue<Integer> ofAll(int[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of a long array.
*
* @param array a long array
* @return A new Queue of Long values
* @throws NullPointerException if array is null
*/
public static Queue<Long> ofAll(long[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Creates a Queue based on the elements of a short array.
*
* @param array a short array
* @return A new Queue of Short values
* @throws NullPointerException if array is null
*/
public static Queue<Short> ofAll(short[] array) {
Objects.requireNonNull(array, "array is null");
return ofAll(List.ofAll(array));
}
/**
* Returns a Queue containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Queue
* @param n The number of elements in the Queue
* @param f The Function computing element values
* @return A Queue consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static <T> Queue<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Collections.tabulate(n, f, empty(), Queue::of);
}
/**
* Returns a Queue containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Queue
* @param n The number of elements in the Queue
* @param s The Supplier computing element values
* @return An Queue of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static <T> Queue<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return Collections.fill(n, s, empty(), Queue::of);
}
public static Queue<Character> range(char from, char toExclusive) {
return ofAll(Iterator.range(from, toExclusive));
}
public static Queue<Character> rangeBy(char from, char toExclusive, int step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
@GwtIncompatible
public static Queue<Double> rangeBy(double from, double toExclusive, double step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.range(0, 0) // = Queue()
* Queue.range(2, 0) // = Queue()
* Queue.range(-2, 2) // = Queue(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or {@code Nil} if {@code from >= toExclusive}
*/
public static Queue<Integer> range(int from, int toExclusive) {
return ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeBy(1, 3, 1) // = Queue(1, 2)
* Queue.rangeBy(1, 4, 2) // = Queue(1, 3)
* Queue.rangeBy(4, 1, -2) // = Queue(4, 2)
* Queue.rangeBy(4, 1, 2) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Integer> rangeBy(int from, int toExclusive, int step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.range(0L, 0L) // = Queue()
* Queue.range(2L, 0L) // = Queue()
* Queue.range(-2L, 2L) // = Queue(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or {@code Nil} if {@code from >= toExclusive}
*/
public static Queue<Long> range(long from, long toExclusive) {
return ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeBy(1L, 3L, 1L) // = Queue(1L, 2L)
* Queue.rangeBy(1L, 4L, 2L) // = Queue(1L, 3L)
* Queue.rangeBy(4L, 1L, -2L) // = Queue(4L, 2L)
* Queue.rangeBy(4L, 1L, 2L) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Long> rangeBy(long from, long toExclusive, long step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
public static Queue<Character> rangeClosed(char from, char toInclusive) {
return ofAll(Iterator.rangeClosed(from, toInclusive));
}
public static Queue<Character> rangeClosedBy(char from, char toInclusive, int step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
@GwtIncompatible
public static Queue<Double> rangeClosedBy(double from, double toInclusive, double step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosed(0, 0) // = Queue(0)
* Queue.rangeClosed(2, 0) // = Queue()
* Queue.rangeClosed(-2, 2) // = Queue(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or {@code Nil} if {@code from > toInclusive}
*/
public static Queue<Integer> rangeClosed(int from, int toInclusive) {
return ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosedBy(1, 3, 1) // = Queue(1, 2, 3)
* Queue.rangeClosedBy(1, 4, 2) // = Queue(1, 3)
* Queue.rangeClosedBy(4, 1, -2) // = Queue(4, 2)
* Queue.rangeClosedBy(4, 1, 2) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosed(0L, 0L) // = Queue(0L)
* Queue.rangeClosed(2L, 0L) // = Queue()
* Queue.rangeClosed(-2L, 2L) // = Queue(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or {@code Nil} if {@code from > toInclusive}
*/
public static Queue<Long> rangeClosed(long from, long toInclusive) {
return ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosedBy(1L, 3L, 1L) // = Queue(1L, 2L, 3L)
* Queue.rangeClosedBy(1L, 4L, 2L) // = Queue(1L, 3L)
* Queue.rangeClosedBy(4L, 1L, -2L) // = Queue(4L, 2L)
* Queue.rangeClosedBy(4L, 1L, 2L) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Long> rangeClosedBy(long from, long toInclusive, long step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Queue from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Queue, otherwise {@code Some} {@code Tuple}
* of the element for the next call and the value to add to the
* resulting Queue.
* <p>
* Example:
* <pre>
* <code>
* Queue.unfoldRight(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x, x-1)));
* // Queue(10, 9, 8, 7, 6, 5, 4, 3, 2, 1))
* </code>
* </pre>
*
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Queue with the values built up by the iteration
* @throws IllegalArgumentException if {@code f} is null
*/
public static <T, U> Queue<U> unfoldRight(T seed, Function<? super T, Option<Tuple2<? extends U, ? extends T>>> f) {
return Iterator.unfoldRight(seed, f).toQueue();
}
/**
* Creates a Queue from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Queue, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting Queue and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Queue.unfoldLeft(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // Queue(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Queue with the values built up by the iteration
* @throws IllegalArgumentException if {@code f} is null
*/
public static <T, U> Queue<U> unfoldLeft(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends U>>> f) {
return Iterator.unfoldLeft(seed, f).toQueue();
}
/**
* Creates a Queue from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Queue, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting Queue and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Queue.unfold(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // Queue(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Queue with the values built up by the iteration
* @throws IllegalArgumentException if {@code f} is null
*/
public static <T> Queue<T> unfold(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends T>>> f) {
return Iterator.unfold(seed, f).toQueue();
}
/**
* Enqueues a new element.
*
* @param element The new element
* @return a new {@code Queue} instance, containing the new element
*/
@Override
public Queue<T> enqueue(T element) {
return new Queue<>(front, rear.prepend(element));
}
// -- Adjusted return types of Seq methods
@Override
public Queue<T> append(T element) {
return enqueue(element);
}
@Override
public Queue<T> appendAll(Iterable<? extends T> elements) {
return enqueueAll(elements);
}
@Override
public Queue<Queue<T>> combinations() {
return ofAll(toList().combinations().map(Queue::ofAll));
}
@Override
public Queue<Queue<T>> combinations(int k) {
return ofAll(toList().combinations(k).map(Queue::ofAll));
}
@Override
public Iterator<Queue<T>> crossProduct(int power) {
return Collections.crossProduct(empty(), this, power);
}
@Override
public Queue<T> distinct() {
return ofAll(toList().distinct());
}
@Override
public Queue<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return ofAll(toList().distinctBy(comparator));
}
@Override
public <U> Queue<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
return ofAll(toList().distinctBy(keyExtractor));
}
@Override
public Queue<T> drop(long n) {
if (n <= 0) {
return this;
}
if (n >= length()) {
return empty();
}
return new Queue<>(front.drop(n), rear.dropRight(n - front.length()));
}
@Override
public Queue<T> dropRight(long n) {
if (n <= 0) {
return this;
}
if (n >= length()) {
return empty();
}
return new Queue<>(front.dropRight(n - rear.length()), rear.drop(n));
}
@Override
public Queue<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
public Queue<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final List<T> dropped = toList().dropWhile(predicate);
return ofAll(dropped.length() == length() ? this : dropped);
}
@Override
public Queue<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final List<T> filtered = toList().filter(predicate);
if (filtered.isEmpty()) {
return empty();
} else if (filtered.length() == length()) {
return this;
} else {
return ofAll(filtered);
}
}
@Override
public <U> Queue<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return empty();
} else {
return new Queue<>(front.flatMap(mapper), rear.flatMap(mapper));
}
}
@Override
public T get(int index) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("get(" + index + ") on empty Queue");
}
if (index < 0) {
throw new IndexOutOfBoundsException("get(" + index + ")");
}
final int length = front.length();
if (index < length) {
return front.get(index);
} else {
final int rearIndex = index - length;
final int rearLength = rear.length();
if (rearIndex < rearLength) {
final int reverseRearIndex = rearLength - rearIndex - 1;
return rear.get(reverseRearIndex);
} else {
throw new IndexOutOfBoundsException("get(" + index + ") on Queue of length " + length());
}
}
}
@Override
public <C> Map<C, Queue<T>> groupBy(Function<? super T, ? extends C> classifier) {
return Collections.groupBy(this, classifier, Queue::ofAll);
}
@Override
public Iterator<Queue<T>> grouped(long size) {
return sliding(size, size);
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public T head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty " + stringPrefix());
} else {
return front.head();
}
}
@Override
public int indexOf(T element, int from) {
final int frontIndex = front.indexOf(element, from);
if (frontIndex != -1) {
return frontIndex;
} else {
// we need to reverse because we search the first occurrence
final int rearIndex = rear.reverse().indexOf(element, from - front.length());
return (rearIndex == -1) ? -1 : rearIndex + front.length();
}
}
@Override
public Queue<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty " + stringPrefix());
} else if (rear.isEmpty()) {
return new Queue<>(front.init(), rear);
} else {
return new Queue<>(front, rear.tail());
}
}
@Override
public Queue<T> insert(int index, T element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
}
final int length = front.length();
if (index <= length) {
return new Queue<>(front.insert(index, element), rear);
} else {
final int rearIndex = index - length;
final int rearLength = rear.length();
if (rearIndex <= rearLength) {
final int reverseRearIndex = rearLength - rearIndex;
return new Queue<>(front, rear.insert(reverseRearIndex, element));
} else {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on Queue of length " + length());
}
}
}
@Override
public Queue<T> insertAll(int index, Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", e)");
}
final int length = front.length();
if (index <= length) {
return new Queue<>(front.insertAll(index, elements), rear);
} else {
final int rearIndex = index - length;
final int rearLength = rear.length();
if (rearIndex <= rearLength) {
final int reverseRearIndex = rearLength - rearIndex;
return new Queue<>(front, rear.insertAll(reverseRearIndex, List.ofAll(elements).reverse()));
} else {
throw new IndexOutOfBoundsException("insertAll(" + index + ", e) on Queue of length " + length());
}
}
}
@Override
public Queue<T> intersperse(T element) {
if (isEmpty()) {
return this;
} else if (rear.isEmpty()) {
return new Queue<>(front.intersperse(element), rear);
} else {
return new Queue<>(front.intersperse(element), rear.intersperse(element).append(element));
}
}
@Override
public boolean isEmpty() {
return front.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
@Override
public int lastIndexOf(T element, int end) {
return toList().lastIndexOf(element, end);
}
@Override
public int length() {
return front.length() + rear.length();
}
@Override
public <U> Queue<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return new Queue<>(front.map(mapper), rear.map(mapper));
}
@Override
public Queue<T> padTo(int length, T element) {
final int actualLength = length();
if (length <= actualLength) {
return this;
} else {
return ofAll(toList().padTo(length, element));
}
}
@Override
public Queue<T> leftPadTo(int length, T element) {
final int actualLength = length();
if (length <= actualLength) {
return this;
} else {
return ofAll(toList().leftPadTo(length, element));
}
}
@Override
public Queue<T> patch(int from, Iterable<? extends T> that, int replaced) {
from = from < 0 ? 0 : from;
replaced = replaced < 0 ? 0 : replaced;
Queue<T> result = take(from).appendAll(that);
from += replaced;
result = result.appendAll(drop(from));
return result;
}
@Override
public Tuple2<Queue<T>, Queue<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return toList().partition(predicate).map(List::toQueue, List::toQueue);
}
@Override
public Queue<Queue<T>> permutations() {
return ofAll(toList().permutations().map(List::toQueue));
}
@Override
public Queue<T> prepend(T element) {
return new Queue<>(front.prepend(element), rear);
}
@Override
public Queue<T> prependAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return new Queue<>(front.prependAll(elements), rear);
}
@Override
public Queue<T> remove(T element) {
final List<T> removed = toList().remove(element);
return ofAll(removed.length() == length() ? this : removed);
}
@Override
public Queue<T> removeFirst(Predicate<T> predicate) {
final List<T> removed = toList().removeFirst(predicate);
return ofAll(removed.length() == length() ? this : removed);
}
@Override
public Queue<T> removeLast(Predicate<T> predicate) {
final List<T> removed = toList().removeLast(predicate);
return ofAll(removed.length() == length() ? this : removed);
}
@Override
public Queue<T> removeAt(int index) {
return ofAll(toList().removeAt(index));
}
@Override
public Queue<T> removeAll(T element) {
return Collections.removeAll(this, element);
}
@Override
public Queue<T> replace(T currentElement, T newElement) {
final List<T> newFront = front.replace(currentElement, newElement);
final List<T> newRear = rear.replace(currentElement, newElement);
return newFront.size() + newRear.size() == 0 ? empty()
: newFront == front && newRear == rear ? this
: new Queue<>(newFront, newRear);
}
@Override
public Queue<T> replaceAll(T currentElement, T newElement) {
final List<T> newFront = front.replaceAll(currentElement, newElement);
final List<T> newRear = rear.replaceAll(currentElement, newElement);
return newFront.size() + newRear.size() == 0 ? empty()
: newFront == front && newRear == rear ? this
: new Queue<>(newFront, newRear);
}
@Override
public Queue<T> reverse() {
return isEmpty() ? this : ofAll(toList().reverse());
}
@Override
public Queue<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
public <U> Queue<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
// prepends to the rear-list in O(1)
return Collections.scanLeft(this, zero, operation, empty(), Queue::append, Function.identity());
}
@Override
public <U> Queue<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
// add elements in reverse order in O(1) and creates a Queue instance in O(1)
final List<U> list = Collections.scanRight(this, zero, operation, List.empty(), List::prepend, Function.identity());
return ofAll(list);
}
@Override
public Queue<T> slice(long beginIndex, long endIndex) {
return ofAll(toList().slice(beginIndex, endIndex));
}
@Override
public Iterator<Queue<T>> sliding(long size) {
return sliding(size, 1);
}
@Override
public Iterator<Queue<T>> sliding(long size, long step) {
return iterator().sliding(size, step).map(Queue::ofAll);
}
@Override
public Queue<T> sorted() {
return ofAll(toList().sorted());
}
@Override
public Queue<T> sorted(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return ofAll(toList().sorted(comparator));
}
@Override
public <U extends Comparable<? super U>> Queue<T> sortBy(Function<? super T, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
public <U> Queue<T> sortBy(Comparator<? super U> comparator, Function<? super T, ? extends U> mapper) {
final Function<? super T, ? extends U> domain = Function1.of(mapper::apply).memoized();
return toJavaStream()
.sorted((e1, e2) -> comparator.compare(domain.apply(e1), domain.apply(e2)))
.collect(collector());
}
@Override
public Tuple2<Queue<T>, Queue<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return toList().span(predicate).map(List::toQueue, List::toQueue);
}
@Override
public Tuple2<Queue<T>, Queue<T>> splitAt(long n) {
return toList().splitAt(n).map(List::toQueue, List::toQueue);
}
@Override
public Tuple2<Queue<T>, Queue<T>> splitAt(Predicate<? super T> predicate) {
return toList().splitAt(predicate).map(List::toQueue, List::toQueue);
}
@Override
public Tuple2<Queue<T>, Queue<T>> splitAtInclusive(Predicate<? super T> predicate) {
return toList().splitAtInclusive(predicate).map(List::toQueue, List::toQueue);
}
@Override
public boolean startsWith(Iterable<? extends T> that, int offset) {
return toList().startsWith(that, offset);
}
@Override
public Spliterator<T> spliterator() {
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
public Queue<T> subSequence(int beginIndex) {
return ofAll(toList().subSequence(beginIndex));
}
@Override
public Queue<T> subSequence(int beginIndex, int endIndex) {
return ofAll(toList().subSequence(beginIndex, endIndex));
}
@Override
public Queue<T> tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty " + stringPrefix());
} else {
return new Queue<>(front.tail(), rear);
}
}
@Override
public Queue<T> take(long n) {
if (n <= 0) {
return empty();
}
if (n >= length()) {
return this;
}
final int frontLength = front.length();
if (n < frontLength) {
return new Queue<>(front.take(n), List.empty());
} else if (n == frontLength) {
return new Queue<>(front, List.empty());
} else {
return new Queue<>(front, rear.takeRight(n - frontLength));
}
}
@Override
public Queue<T> takeRight(long n) {
if (n <= 0) {
return empty();
}
if (n >= length()) {
return this;
}
final int rearLength = rear.length();
if (n < rearLength) {
return new Queue<>(rear.take(n).reverse(), List.empty());
} else if (n == rearLength) {
return new Queue<>(rear.reverse(), List.empty());
} else {
return new Queue<>(front.takeRight(n - rearLength), rear);
}
}
@Override
public Queue<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final List<T> taken = toList().takeUntil(predicate);
return taken.length() == length() ? this : ofAll(taken);
}
/**
* Transforms this {@code Queue}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super Queue<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
public <U> Queue<U> unit(Iterable<? extends U> iterable) {
return ofAll(iterable);
}
@Override
public <T1, T2> Tuple2<Queue<T1>, Queue<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return toList().unzip(unzipper).map(List::toQueue, List::toQueue);
}
@Override
public <T1, T2, T3> Tuple3<Queue<T1>, Queue<T2>, Queue<T3>> unzip3(Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return toList().unzip3(unzipper).map(List::toQueue, List::toQueue, List::toQueue);
}
@Override
public Queue<T> update(int index, T element) {
return ofAll(toList().update(index, element));
}
@SuppressWarnings("unchecked")
@Override
public <U> Queue<Tuple2<T, U>> zip(Iterable<? extends U> that) {
Objects.requireNonNull(that, "that is null");
return ofAll(toList().zip((Iterable<U>) that));
}
@Override
public <U> Queue<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
return ofAll(toList().zipAll(that, thisElem, thatElem));
}
@Override
public Queue<Tuple2<T, Long>> zipWithIndex() {
return ofAll(toList().zipWithIndex());
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public String stringPrefix() {
return "Queue";
}
@Override
public boolean equals(Object o) {
return o == this || o instanceof Queue && Collections.areEqual(this, (Iterable) o);
}
}
| 1 | 8,885 | Does a type-hint work instead of casting? `return ofAll(toList().<U> zipWith(that, mapper));` (Probably not, just a question.) | vavr-io-vavr | java |
@@ -90,11 +90,11 @@ public class TestTableMetadata {
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
- new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
+ new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, 7, ImmutableList.of(
- new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
+ new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
List<HistoryEntry> snapshotLog = ImmutableList.<HistoryEntry>builder()
.add(new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId())) | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import java.io.IOException;
import java.io.StringWriter;
import java.io.UncheckedIOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.SortedSet;
import java.util.UUID;
import org.apache.iceberg.TableMetadata.MetadataLogEntry;
import org.apache.iceberg.TableMetadata.SnapshotLogEntry;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.JsonUtil;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import static org.apache.iceberg.Files.localInput;
import static org.apache.iceberg.TableMetadataParser.CURRENT_SNAPSHOT_ID;
import static org.apache.iceberg.TableMetadataParser.FORMAT_VERSION;
import static org.apache.iceberg.TableMetadataParser.LAST_COLUMN_ID;
import static org.apache.iceberg.TableMetadataParser.LAST_UPDATED_MILLIS;
import static org.apache.iceberg.TableMetadataParser.LOCATION;
import static org.apache.iceberg.TableMetadataParser.PARTITION_SPEC;
import static org.apache.iceberg.TableMetadataParser.PROPERTIES;
import static org.apache.iceberg.TableMetadataParser.SCHEMA;
import static org.apache.iceberg.TableMetadataParser.SNAPSHOTS;
import static org.apache.iceberg.TestHelpers.assertSameSchemaList;
public class TestTableMetadata {
private static final String TEST_LOCATION = "s3://bucket/test/location";
private static final Schema TEST_SCHEMA = new Schema(7,
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get(), "comment"),
Types.NestedField.required(3, "z", Types.LongType.get())
);
private static final long SEQ_NO = 34;
private static final int LAST_ASSIGNED_COLUMN_ID = 3;
private static final PartitionSpec SPEC_5 = PartitionSpec.builderFor(TEST_SCHEMA).withSpecId(5).build();
private static final SortOrder SORT_ORDER_3 = SortOrder.builderFor(TEST_SCHEMA)
.withOrderId(3)
.asc("y", NullOrder.NULLS_FIRST)
.desc(Expressions.bucket("z", 4), NullOrder.NULLS_LAST)
.build();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
public TableOperations ops = new LocalTableOperations(temp);
@Test
public void testJsonConversion() throws Exception {
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, 7, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
List<HistoryEntry> snapshotLog = ImmutableList.<HistoryEntry>builder()
.add(new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()))
.add(new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()))
.build();
Schema schema = new Schema(6,
Types.NestedField.required(10, "x", Types.StringType.get()));
TableMetadata expected = new TableMetadata(null, 2, UUID.randomUUID().toString(), TEST_LOCATION,
SEQ_NO, System.currentTimeMillis(), 3,
7, ImmutableList.of(TEST_SCHEMA, schema),
5, ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(),
3, ImmutableList.of(SORT_ORDER_3), ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), snapshotLog, ImmutableList.of());
String asJson = TableMetadataParser.toJson(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops.io(), null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Format version should match",
expected.formatVersion(), metadata.formatVersion());
Assert.assertEquals("Table UUID should match",
expected.uuid(), metadata.uuid());
Assert.assertEquals("Table location should match",
expected.location(), metadata.location());
Assert.assertEquals("Last sequence number should match",
expected.lastSequenceNumber(), metadata.lastSequenceNumber());
Assert.assertEquals("Last column ID should match",
expected.lastColumnId(), metadata.lastColumnId());
Assert.assertEquals("Current schema id should match",
expected.currentSchemaId(), metadata.currentSchemaId());
assertSameSchemaList(expected.schemas(), metadata.schemas());
Assert.assertEquals("Partition spec should match",
expected.spec().toString(), metadata.spec().toString());
Assert.assertEquals("Default spec ID should match",
expected.defaultSpecId(), metadata.defaultSpecId());
Assert.assertEquals("PartitionSpec map should match",
expected.specs(), metadata.specs());
Assert.assertEquals("lastAssignedFieldId across all PartitionSpecs should match",
expected.spec().lastAssignedFieldId(), metadata.lastAssignedPartitionId());
Assert.assertEquals("Default sort ID should match",
expected.defaultSortOrderId(), metadata.defaultSortOrderId());
Assert.assertEquals("Sort order should match",
expected.sortOrder(), metadata.sortOrder());
Assert.assertEquals("Sort order map should match",
expected.sortOrders(), metadata.sortOrders());
Assert.assertEquals("Properties should match",
expected.properties(), metadata.properties());
Assert.assertEquals("Snapshot logs should match",
expected.snapshotLog(), metadata.snapshotLog());
Assert.assertEquals("Current snapshot ID should match",
currentSnapshotId, metadata.currentSnapshot().snapshotId());
Assert.assertEquals("Parent snapshot ID should match",
(Long) previousSnapshotId, metadata.currentSnapshot().parentId());
Assert.assertEquals("Current snapshot files should match",
currentSnapshot.allManifests(), metadata.currentSnapshot().allManifests());
Assert.assertEquals("Schema ID for current snapshot should match",
(Integer) 7, metadata.currentSnapshot().schemaId());
Assert.assertEquals("Previous snapshot ID should match",
previousSnapshotId, metadata.snapshot(previousSnapshotId).snapshotId());
Assert.assertEquals("Previous snapshot files should match",
previousSnapshot.allManifests(),
metadata.snapshot(previousSnapshotId).allManifests());
Assert.assertNull("Previous snapshot's schema ID should be null",
metadata.snapshot(previousSnapshotId).schemaId());
}
@Test
public void testBackwardCompat() throws Exception {
PartitionSpec spec = PartitionSpec.builderFor(TEST_SCHEMA).identity("x").withSpecId(6).build();
SortOrder sortOrder = SortOrder.unsorted();
Schema schema = new Schema(TableMetadata.INITIAL_SCHEMA_ID, TEST_SCHEMA.columns());
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
TableMetadata expected = new TableMetadata(null, 1, null, TEST_LOCATION,
0, System.currentTimeMillis(), 3, TableMetadata.INITIAL_SCHEMA_ID,
ImmutableList.of(schema), 6, ImmutableList.of(spec), spec.lastAssignedFieldId(),
TableMetadata.INITIAL_SORT_ORDER_ID, ImmutableList.of(sortOrder), ImmutableMap.of("property", "value"),
currentSnapshotId, Arrays.asList(previousSnapshot, currentSnapshot), ImmutableList.of(), ImmutableList.of());
String asJson = toJsonWithoutSpecAndSchemaList(expected);
TableMetadata metadata = TableMetadataParser
.fromJson(ops.io(), null, JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Format version should match",
expected.formatVersion(), metadata.formatVersion());
Assert.assertNull("Table UUID should not be assigned", metadata.uuid());
Assert.assertEquals("Table location should match",
expected.location(), metadata.location());
Assert.assertEquals("Last sequence number should default to 0",
expected.lastSequenceNumber(), metadata.lastSequenceNumber());
Assert.assertEquals("Last column ID should match",
expected.lastColumnId(), metadata.lastColumnId());
Assert.assertEquals("Current schema ID should be default to TableMetadata.INITIAL_SCHEMA_ID",
TableMetadata.INITIAL_SCHEMA_ID, metadata.currentSchemaId());
Assert.assertEquals("Schemas size should match",
1, metadata.schemas().size());
Assert.assertEquals("Schemas should contain the schema",
metadata.schemas().get(0).asStruct(), schema.asStruct());
Assert.assertEquals("Partition spec should be the default",
expected.spec().toString(), metadata.spec().toString());
Assert.assertEquals("Default spec ID should default to TableMetadata.INITIAL_SPEC_ID",
TableMetadata.INITIAL_SPEC_ID, metadata.defaultSpecId());
Assert.assertEquals("PartitionSpec should contain the spec",
1, metadata.specs().size());
Assert.assertTrue("PartitionSpec should contain the spec",
metadata.specs().get(0).compatibleWith(spec));
Assert.assertEquals("PartitionSpec should have ID TableMetadata.INITIAL_SPEC_ID",
TableMetadata.INITIAL_SPEC_ID, metadata.specs().get(0).specId());
Assert.assertEquals("lastAssignedFieldId across all PartitionSpecs should match",
expected.spec().lastAssignedFieldId(), metadata.lastAssignedPartitionId());
Assert.assertEquals("Properties should match",
expected.properties(), metadata.properties());
Assert.assertEquals("Snapshot logs should match",
expected.snapshotLog(), metadata.snapshotLog());
Assert.assertEquals("Current snapshot ID should match",
currentSnapshotId, metadata.currentSnapshot().snapshotId());
Assert.assertEquals("Parent snapshot ID should match",
(Long) previousSnapshotId, metadata.currentSnapshot().parentId());
Assert.assertEquals("Current snapshot files should match",
currentSnapshot.allManifests(), metadata.currentSnapshot().allManifests());
Assert.assertNull("Current snapshot's schema ID should be null",
metadata.currentSnapshot().schemaId());
Assert.assertEquals("Previous snapshot ID should match",
previousSnapshotId, metadata.snapshot(previousSnapshotId).snapshotId());
Assert.assertEquals("Previous snapshot files should match",
previousSnapshot.allManifests(),
metadata.snapshot(previousSnapshotId).allManifests());
Assert.assertEquals("Snapshot logs should match",
expected.previousFiles(), metadata.previousFiles());
Assert.assertNull("Previous snapshot's schema ID should be null",
metadata.snapshot(previousSnapshotId).schemaId());
}
private static String toJsonWithoutSpecAndSchemaList(TableMetadata metadata) {
StringWriter writer = new StringWriter();
try {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.writeStartObject(); // start table metadata object
generator.writeNumberField(FORMAT_VERSION, 1);
generator.writeStringField(LOCATION, metadata.location());
generator.writeNumberField(LAST_UPDATED_MILLIS, metadata.lastUpdatedMillis());
generator.writeNumberField(LAST_COLUMN_ID, metadata.lastColumnId());
// mimic an old writer by writing only schema and not the current ID or schema list
generator.writeFieldName(SCHEMA);
SchemaParser.toJson(metadata.schema().asStruct(), generator);
// mimic an old writer by writing only partition-spec and not the default ID or spec list
generator.writeFieldName(PARTITION_SPEC);
PartitionSpecParser.toJsonFields(metadata.spec(), generator);
generator.writeObjectFieldStart(PROPERTIES);
for (Map.Entry<String, String> keyValue : metadata.properties().entrySet()) {
generator.writeStringField(keyValue.getKey(), keyValue.getValue());
}
generator.writeEndObject();
generator.writeNumberField(CURRENT_SNAPSHOT_ID,
metadata.currentSnapshot() != null ? metadata.currentSnapshot().snapshotId() : -1);
generator.writeArrayFieldStart(SNAPSHOTS);
for (Snapshot snapshot : metadata.snapshots()) {
SnapshotParser.toJson(snapshot, generator);
}
generator.writeEndArray();
// skip the snapshot log
generator.writeEndObject(); // end table metadata object
generator.flush();
} catch (IOException e) {
throw new UncheckedIOException(String.format("Failed to write json for: %s", metadata), e);
}
return writer.toString();
}
@Test
public void testJsonWithPreviousMetadataLog() throws Exception {
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList();
long currentTimestamp = System.currentTimeMillis();
List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList();
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp,
"/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json"));
TableMetadata base = new TableMetadata(null, 1, UUID.randomUUID().toString(), TEST_LOCATION,
0, System.currentTimeMillis(), 3,
7, ImmutableList.of(TEST_SCHEMA), 5, ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(),
3, ImmutableList.of(SORT_ORDER_3), ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog,
ImmutableList.copyOf(previousMetadataLog));
String asJson = TableMetadataParser.toJson(base);
TableMetadata metadataFromJson = TableMetadataParser.fromJson(ops.io(), null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Metadata logs should match", previousMetadataLog, metadataFromJson.previousFiles());
}
@Test
public void testAddPreviousMetadataRemoveNone() {
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList();
long currentTimestamp = System.currentTimeMillis();
List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList();
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100,
"/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90,
"/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json"));
MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 80,
"/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json");
TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(),
TEST_LOCATION, 0, currentTimestamp - 80, 3,
7, ImmutableList.of(TEST_SCHEMA), 5, ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(),
3, ImmutableList.of(SORT_ORDER_3), ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog,
ImmutableList.copyOf(previousMetadataLog));
previousMetadataLog.add(latestPreviousMetadata);
TableMetadata metadata = base.replaceProperties(
ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "5"));
Set<MetadataLogEntry> removedPreviousMetadata = Sets.newHashSet(base.previousFiles());
removedPreviousMetadata.removeAll(metadata.previousFiles());
Assert.assertEquals("Metadata logs should match", previousMetadataLog, metadata.previousFiles());
Assert.assertEquals("Removed Metadata logs should be empty", 0, removedPreviousMetadata.size());
}
@Test
public void testAddPreviousMetadataRemoveOne() {
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList();
long currentTimestamp = System.currentTimeMillis();
List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList();
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100,
"/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90,
"/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 80,
"/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 70,
"/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 60,
"/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json"));
MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 50,
"/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json");
TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(),
TEST_LOCATION, 0, currentTimestamp - 50, 3,
7, ImmutableList.of(TEST_SCHEMA), 5,
ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(), 3, ImmutableList.of(SORT_ORDER_3),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog,
ImmutableList.copyOf(previousMetadataLog));
previousMetadataLog.add(latestPreviousMetadata);
TableMetadata metadata = base.replaceProperties(
ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "5"));
SortedSet<MetadataLogEntry> removedPreviousMetadata =
Sets.newTreeSet(Comparator.comparingLong(MetadataLogEntry::timestampMillis));
removedPreviousMetadata.addAll(base.previousFiles());
removedPreviousMetadata.removeAll(metadata.previousFiles());
Assert.assertEquals("Metadata logs should match", previousMetadataLog.subList(1, 6),
metadata.previousFiles());
Assert.assertEquals("Removed Metadata logs should contain 1", previousMetadataLog.subList(0, 1),
ImmutableList.copyOf(removedPreviousMetadata));
}
@Test
public void testAddPreviousMetadataRemoveMultiple() {
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, null, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));
List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList();
long currentTimestamp = System.currentTimeMillis();
List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList();
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100,
"/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90,
"/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 80,
"/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 70,
"/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json"));
previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 60,
"/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json"));
MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 50,
"/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json");
TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(),
TEST_LOCATION, 0, currentTimestamp - 50, 3, 7, ImmutableList.of(TEST_SCHEMA), 2,
ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(),
TableMetadata.INITIAL_SORT_ORDER_ID, ImmutableList.of(SortOrder.unsorted()),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog,
ImmutableList.copyOf(previousMetadataLog));
previousMetadataLog.add(latestPreviousMetadata);
TableMetadata metadata = base.replaceProperties(
ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "2"));
SortedSet<MetadataLogEntry> removedPreviousMetadata =
Sets.newTreeSet(Comparator.comparingLong(MetadataLogEntry::timestampMillis));
removedPreviousMetadata.addAll(base.previousFiles());
removedPreviousMetadata.removeAll(metadata.previousFiles());
Assert.assertEquals("Metadata logs should match", previousMetadataLog.subList(4, 6),
metadata.previousFiles());
Assert.assertEquals("Removed Metadata logs should contain 4", previousMetadataLog.subList(0, 4),
ImmutableList.copyOf(removedPreviousMetadata));
}
@Test
public void testV2UUIDValidation() {
AssertHelpers.assertThrows("Should reject v2 metadata without a UUID",
IllegalArgumentException.class, "UUID is required in format v2",
() -> new TableMetadata(null, 2, null, TEST_LOCATION, SEQ_NO, System.currentTimeMillis(),
LAST_ASSIGNED_COLUMN_ID, 7, ImmutableList.of(TEST_SCHEMA),
SPEC_5.specId(), ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(),
3, ImmutableList.of(SORT_ORDER_3), ImmutableMap.of(), -1L,
ImmutableList.of(), ImmutableList.of(), ImmutableList.of())
);
}
@Test
public void testVersionValidation() {
int unsupportedVersion = TableMetadata.SUPPORTED_TABLE_FORMAT_VERSION + 1;
AssertHelpers.assertThrows("Should reject unsupported metadata",
IllegalArgumentException.class, "Unsupported format version: v" + unsupportedVersion,
() -> new TableMetadata(null, unsupportedVersion, null, TEST_LOCATION, SEQ_NO,
System.currentTimeMillis(), LAST_ASSIGNED_COLUMN_ID,
7, ImmutableList.of(TEST_SCHEMA), SPEC_5.specId(), ImmutableList.of(SPEC_5),
SPEC_5.lastAssignedFieldId(), 3, ImmutableList.of(SORT_ORDER_3), ImmutableMap.of(), -1L,
ImmutableList.of(), ImmutableList.of(), ImmutableList.of())
);
}
@Test
public void testParserVersionValidation() throws Exception {
String supportedVersion1 = readTableMetadataInputFile("TableMetadataV1Valid.json");
TableMetadata parsed1 = TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(supportedVersion1, JsonNode.class));
Assert.assertNotNull("Should successfully read supported metadata version", parsed1);
String supportedVersion2 = readTableMetadataInputFile("TableMetadataV2Valid.json");
TableMetadata parsed2 = TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(supportedVersion2, JsonNode.class));
Assert.assertNotNull("Should successfully read supported metadata version", parsed2);
String unsupportedVersion = readTableMetadataInputFile("TableMetadataUnsupportedVersion.json");
AssertHelpers.assertThrows("Should not read unsupported metadata",
IllegalArgumentException.class, "Cannot read unsupported version",
() -> TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(unsupportedVersion, JsonNode.class))
);
}
@Test
public void testParserV2PartitionSpecsValidation() throws Exception {
String unsupportedVersion = readTableMetadataInputFile("TableMetadataV2MissingPartitionSpecs.json");
AssertHelpers.assertThrows("Should reject v2 metadata without partition specs",
IllegalArgumentException.class, "partition-specs must exist in format v2",
() -> TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(unsupportedVersion, JsonNode.class))
);
}
@Test
public void testParserV2LastAssignedFieldIdValidation() throws Exception {
String unsupportedVersion = readTableMetadataInputFile("TableMetadataV2MissingLastPartitionId.json");
AssertHelpers.assertThrows("Should reject v2 metadata without last assigned partition field id",
IllegalArgumentException.class, "last-partition-id must exist in format v2",
() -> TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(unsupportedVersion, JsonNode.class))
);
}
@Test
public void testParserV2SortOrderValidation() throws Exception {
String unsupportedVersion = readTableMetadataInputFile("TableMetadataV2MissingSortOrder.json");
AssertHelpers.assertThrows("Should reject v2 metadata without sort order",
IllegalArgumentException.class, "sort-orders must exist in format v2",
() -> TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(unsupportedVersion, JsonNode.class))
);
}
@Test
public void testParserV2CurrentSchemaIdValidation() throws Exception {
String unsupported = readTableMetadataInputFile("TableMetadataV2CurrentSchemaNotFound.json");
AssertHelpers.assertThrows("Should reject v2 metadata without valid schema id",
IllegalArgumentException.class, "Cannot find schema with current-schema-id=2 from schemas",
() -> TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(unsupported, JsonNode.class))
);
}
@Test
public void testParserV2SchemasValidation() throws Exception {
String unsupported = readTableMetadataInputFile("TableMetadataV2MissingSchemas.json");
AssertHelpers.assertThrows("Should reject v2 metadata without schemas",
IllegalArgumentException.class, "schemas must exist in format v2",
() -> TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(unsupported, JsonNode.class))
);
}
private String readTableMetadataInputFile(String fileName) throws Exception {
Path path = Paths.get(getClass().getClassLoader().getResource(fileName).toURI());
return String.join("", java.nio.file.Files.readAllLines(path));
}
@Test
public void testNewTableMetadataReassignmentAllIds() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(3, "x", Types.LongType.get()),
Types.NestedField.required(4, "y", Types.LongType.get()),
Types.NestedField.required(5, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(5)
.add(3, 1005, "x_partition", "bucket[4]")
.add(5, 1003, "z_partition", "bucket[8]")
.build();
String location = "file://tmp/db/table";
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, location, ImmutableMap.of());
// newTableMetadata should reassign column ids and partition field ids.
PartitionSpec expected = PartitionSpec.builderFor(metadata.schema()).withSpecId(0)
.add(1, 1000, "x_partition", "bucket[4]")
.add(3, 1001, "z_partition", "bucket[8]")
.build();
Assert.assertEquals(expected, metadata.spec());
}
@Test
public void testInvalidUpdatePartitionSpecForV1Table() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(5)
.add(1, 1005, "x_partition", "bucket[4]")
.build();
String location = "file://tmp/db/table";
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, location, ImmutableMap.of());
AssertHelpers.assertThrows("Should fail to update an invalid partition spec",
ValidationException.class, "Spec does not use sequential IDs that are required in v1",
() -> metadata.updatePartitionSpec(spec));
}
@Test
public void testBuildReplacementForV1Table() {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(0)
.identity("x")
.identity("y")
.build();
String location = "file://tmp/db/table";
TableMetadata metadata = TableMetadata.newTableMetadata(
schema, spec, SortOrder.unsorted(), location, ImmutableMap.of(), 1);
Assert.assertEquals(spec, metadata.spec());
Schema updatedSchema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "z", Types.StringType.get()),
Types.NestedField.required(3, "y", Types.LongType.get())
);
PartitionSpec updatedSpec = PartitionSpec.builderFor(updatedSchema).withSpecId(0)
.bucket("z", 8)
.identity("x")
.build();
TableMetadata updated = metadata.buildReplacement(
updatedSchema, updatedSpec, SortOrder.unsorted(), location, ImmutableMap.of());
PartitionSpec expected = PartitionSpec.builderFor(updated.schema()).withSpecId(1)
.add(1, 1000, "x", "identity")
.add(2, 1001, "y", "void")
.add(3, 1002, "z_bucket", "bucket[8]")
.build();
Assert.assertEquals(
"Should reassign the partition field IDs and reuse any existing IDs for equivalent fields",
expected, updated.spec());
}
@Test
public void testBuildReplacementForV2Table() {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(0)
.identity("x")
.identity("y")
.build();
String location = "file://tmp/db/table";
TableMetadata metadata = TableMetadata.newTableMetadata(
schema, spec, SortOrder.unsorted(), location, ImmutableMap.of(), 2);
Assert.assertEquals(spec, metadata.spec());
Schema updatedSchema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "z", Types.StringType.get())
);
PartitionSpec updatedSpec = PartitionSpec.builderFor(updatedSchema).withSpecId(0)
.bucket("z", 8)
.identity("x")
.build();
TableMetadata updated = metadata.buildReplacement(
updatedSchema, updatedSpec, SortOrder.unsorted(), location, ImmutableMap.of());
PartitionSpec expected = PartitionSpec.builderFor(updated.schema()).withSpecId(1)
.add(3, 1002, "z_bucket", "bucket[8]")
.add(1, 1000, "x", "identity")
.build();
Assert.assertEquals(
"Should reassign the partition field IDs and reuse any existing IDs for equivalent fields",
expected, updated.spec());
}
@Test
public void testSortOrder() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
TableMetadata meta = TableMetadata.newTableMetadata(
schema, PartitionSpec.unpartitioned(), null, ImmutableMap.of());
Assert.assertTrue("Should default to unsorted order", meta.sortOrder().isUnsorted());
Assert.assertSame("Should detect identical unsorted order", meta, meta.replaceSortOrder(SortOrder.unsorted()));
}
@Test
public void testUpdateSortOrder() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
SortOrder order = SortOrder.builderFor(schema).asc("x").build();
TableMetadata sortedByX = TableMetadata.newTableMetadata(
schema, PartitionSpec.unpartitioned(), order, null, ImmutableMap.of());
Assert.assertEquals("Should have 1 sort order", 1, sortedByX.sortOrders().size());
Assert.assertEquals("Should use orderId 1", 1, sortedByX.sortOrder().orderId());
Assert.assertEquals("Should be sorted by one field", 1, sortedByX.sortOrder().fields().size());
Assert.assertEquals("Should use the table's field ids", 1, sortedByX.sortOrder().fields().get(0).sourceId());
Assert.assertEquals("Should be ascending",
SortDirection.ASC, sortedByX.sortOrder().fields().get(0).direction());
Assert.assertEquals("Should be nulls first",
NullOrder.NULLS_FIRST, sortedByX.sortOrder().fields().get(0).nullOrder());
// build an equivalent order with the correct schema
SortOrder newOrder = SortOrder.builderFor(sortedByX.schema()).asc("x").build();
TableMetadata alsoSortedByX = sortedByX.replaceSortOrder(newOrder);
Assert.assertSame("Should detect current sortOrder and not update", alsoSortedByX, sortedByX);
TableMetadata unsorted = alsoSortedByX.replaceSortOrder(SortOrder.unsorted());
Assert.assertEquals("Should have 2 sort orders", 2, unsorted.sortOrders().size());
Assert.assertEquals("Should use orderId 0", 0, unsorted.sortOrder().orderId());
Assert.assertTrue("Should be unsorted", unsorted.sortOrder().isUnsorted());
TableMetadata sortedByXDesc = unsorted.replaceSortOrder(SortOrder.builderFor(unsorted.schema()).desc("x").build());
Assert.assertEquals("Should have 3 sort orders", 3, sortedByXDesc.sortOrders().size());
Assert.assertEquals("Should use orderId 2", 2, sortedByXDesc.sortOrder().orderId());
Assert.assertEquals("Should be sorted by one field", 1, sortedByXDesc.sortOrder().fields().size());
Assert.assertEquals("Should use the table's field ids", 1, sortedByXDesc.sortOrder().fields().get(0).sourceId());
Assert.assertEquals("Should be ascending",
SortDirection.DESC, sortedByXDesc.sortOrder().fields().get(0).direction());
Assert.assertEquals("Should be nulls first",
NullOrder.NULLS_FIRST, sortedByX.sortOrder().fields().get(0).nullOrder());
}
@Test
public void testParseSchemaIdentifierFields() throws Exception {
String data = readTableMetadataInputFile("TableMetadataV2Valid.json");
TableMetadata parsed = TableMetadataParser.fromJson(
ops.io(), null, JsonUtil.mapper().readValue(data, JsonNode.class));
Assert.assertEquals(Sets.newHashSet(), parsed.schemasById().get(0).identifierFieldIds());
Assert.assertEquals(Sets.newHashSet(1, 2), parsed.schemasById().get(1).identifierFieldIds());
}
@Test
public void testUpdateSchemaIdentifierFields() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
TableMetadata meta = TableMetadata.newTableMetadata(
schema, PartitionSpec.unpartitioned(), null, ImmutableMap.of());
Schema newSchema = new Schema(
Lists.newArrayList(Types.NestedField.required(1, "x", Types.StringType.get())),
Sets.newHashSet(1)
);
TableMetadata newMeta = meta.updateSchema(newSchema, 1);
Assert.assertEquals(2, newMeta.schemas().size());
Assert.assertEquals(Sets.newHashSet(1), newMeta.schema().identifierFieldIds());
}
@Test
public void testUpdateSchema() {
Schema schema = new Schema(0,
Types.NestedField.required(1, "y", Types.LongType.get(), "comment")
);
TableMetadata freshTable = TableMetadata.newTableMetadata(
schema, PartitionSpec.unpartitioned(), null, ImmutableMap.of());
Assert.assertEquals("Should use TableMetadata.INITIAL_SCHEMA_ID for current schema id",
TableMetadata.INITIAL_SCHEMA_ID, freshTable.currentSchemaId());
assertSameSchemaList(ImmutableList.of(schema), freshTable.schemas());
Assert.assertEquals("Should have expected schema upon return",
schema.asStruct(), freshTable.schema().asStruct());
Assert.assertEquals("Should return expected last column id", 1, freshTable.lastColumnId());
// update schema
Schema schema2 = new Schema(
Types.NestedField.required(1, "y", Types.LongType.get(), "comment"),
Types.NestedField.required(2, "x", Types.StringType.get())
);
TableMetadata twoSchemasTable = freshTable.updateSchema(schema2, 2);
Assert.assertEquals("Should have current schema id as 1",
1, twoSchemasTable.currentSchemaId());
assertSameSchemaList(ImmutableList.of(schema, new Schema(1, schema2.columns())),
twoSchemasTable.schemas());
Assert.assertEquals("Should have expected schema upon return",
schema2.asStruct(), twoSchemasTable.schema().asStruct());
Assert.assertEquals("Should return expected last column id", 2, twoSchemasTable.lastColumnId());
// update schema with the the same schema and last column ID as current shouldn't cause change
Schema sameSchema2 = new Schema(
Types.NestedField.required(1, "y", Types.LongType.get(), "comment"),
Types.NestedField.required(2, "x", Types.StringType.get())
);
TableMetadata sameSchemaTable = twoSchemasTable.updateSchema(sameSchema2, 2);
Assert.assertSame("Should return same table metadata",
twoSchemasTable, sameSchemaTable);
// update schema with the the same schema and different last column ID as current should create a new table
TableMetadata differentColumnIdTable = sameSchemaTable.updateSchema(sameSchema2, 3);
Assert.assertEquals("Should have current schema id as 1",
1, differentColumnIdTable.currentSchemaId());
assertSameSchemaList(ImmutableList.of(schema, new Schema(1, schema2.columns())),
differentColumnIdTable.schemas());
Assert.assertEquals("Should have expected schema upon return",
schema2.asStruct(), differentColumnIdTable.schema().asStruct());
Assert.assertEquals("Should return expected last column id",
3, differentColumnIdTable.lastColumnId());
// update schema with old schema does not change schemas
TableMetadata revertSchemaTable = differentColumnIdTable.updateSchema(schema, 3);
Assert.assertEquals("Should have current schema id as 0",
0, revertSchemaTable.currentSchemaId());
assertSameSchemaList(ImmutableList.of(schema, new Schema(1, schema2.columns())),
revertSchemaTable.schemas());
Assert.assertEquals("Should have expected schema upon return",
schema.asStruct(), revertSchemaTable.schema().asStruct());
Assert.assertEquals("Should return expected last column id",
3, revertSchemaTable.lastColumnId());
// create new schema will use the largest schema id + 1
Schema schema3 = new Schema(
Types.NestedField.required(2, "y", Types.LongType.get(), "comment"),
Types.NestedField.required(4, "x", Types.StringType.get()),
Types.NestedField.required(6, "z", Types.IntegerType.get())
);
TableMetadata threeSchemaTable = revertSchemaTable.updateSchema(schema3, 6);
Assert.assertEquals("Should have current schema id as 2",
2, threeSchemaTable.currentSchemaId());
assertSameSchemaList(ImmutableList.of(schema,
new Schema(1, schema2.columns()),
new Schema(2, schema3.columns())), threeSchemaTable.schemas());
Assert.assertEquals("Should have expected schema upon return",
schema3.asStruct(), threeSchemaTable.schema().asStruct());
Assert.assertEquals("Should return expected last column id",
6, threeSchemaTable.lastColumnId());
}
@Test
public void testCreateV2MetadataThroughTableProperty() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
TableMetadata meta = TableMetadata.newTableMetadata(schema, PartitionSpec.unpartitioned(), null,
ImmutableMap.of(TableProperties.FORMAT_VERSION, "2", "key", "val"));
Assert.assertEquals("format version should be configured based on the format-version key",
2, meta.formatVersion());
Assert.assertEquals("should not contain format-version in properties",
ImmutableMap.of("key", "val"), meta.properties());
}
@Test
public void testReplaceV1MetadataToV2ThroughTableProperty() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
TableMetadata meta = TableMetadata.newTableMetadata(schema, PartitionSpec.unpartitioned(), null,
ImmutableMap.of(TableProperties.FORMAT_VERSION, "1", "key", "val"));
meta = meta.buildReplacement(meta.schema(), meta.spec(), meta.sortOrder(), meta.location(),
ImmutableMap.of(TableProperties.FORMAT_VERSION, "2", "key2", "val2"));
Assert.assertEquals("format version should be configured based on the format-version key",
2, meta.formatVersion());
Assert.assertEquals("should not contain format-version but should contain old and new properties",
ImmutableMap.of("key", "val", "key2", "val2"), meta.properties());
}
@Test
public void testUpgradeV1MetadataToV2ThroughTableProperty() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
TableMetadata meta = TableMetadata.newTableMetadata(schema, PartitionSpec.unpartitioned(), null,
ImmutableMap.of(TableProperties.FORMAT_VERSION, "1", "key", "val"));
meta = meta.replaceProperties(ImmutableMap.of(TableProperties.FORMAT_VERSION,
"2", "key2", "val2"));
Assert.assertEquals("format version should be configured based on the format-version key",
2, meta.formatVersion());
Assert.assertEquals("should not contain format-version but should contain new properties",
ImmutableMap.of("key2", "val2"), meta.properties());
}
@Test
public void testNoReservedPropertyForTableMetadataCreation() {
Schema schema = new Schema(
Types.NestedField.required(10, "x", Types.StringType.get())
);
AssertHelpers.assertThrows("should not allow reserved table property when creating table metadata",
IllegalArgumentException.class,
"Table properties should not contain reserved properties, but got {format-version=1}",
() -> TableMetadata.newTableMetadata(schema, PartitionSpec.unpartitioned(), null, "/tmp",
ImmutableMap.of(TableProperties.FORMAT_VERSION, "1"), 1));
}
}
| 1 | 39,147 | Are these required? | apache-iceberg | java |
@@ -544,6 +544,13 @@ func (c *client) initClient() {
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
+ // Snapshot max control line since currently can not be changed on reload and we
+ // were checking it on each call to parse. If this changes and we allow MaxControlLine
+ // to be reloaded without restart, this code will need to change.
+ c.mcl = int32(opts.MaxControlLine)
+ if c.mcl == 0 {
+ c.mcl = MAX_CONTROL_LINE_SIZE
+ }
c.subs = make(map[string]*subscription)
c.echo = true | 1 | // Copyright 2012-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER represents another server in the cluster.
ROUTER
// GATEWAY is a link between 2 clusters.
GATEWAY
// SYSTEM is an internal system client.
SYSTEM
// LEAF is for leaf node connections.
LEAF
// JETSTREAM is an internal jetstream client.
JETSTREAM
// ACCOUNT is for the internal client for accounts.
ACCOUNT
)
// Extended type of a CLIENT connection. This is returned by c.clientType()
// and indicate what type of client connection we are dealing with.
// If invoked on a non CLIENT connection, NON_CLIENT type is returned.
const (
// If the connection is not a CLIENT connection.
NON_CLIENT = iota
// Regular NATS client.
NATS
// MQTT client.
MQTT
// Websocket client.
WS
)
const (
// ClientProtoZero is the original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// ClientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
const (
pingProto = "PING" + _CRLF_
pongProto = "PONG" + _CRLF_
errProto = "-ERR '%s'" + _CRLF_
okProto = "+OK" + _CRLF_
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 1024
msgHeadProto = "RMSG "
msgHeadProtoLen = len(msgHeadProto)
// For controlling dynamic buffer sizes.
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 64 // Smallest to shrink to for PING/PONG
maxBufSize = 65536 // 64k
shortsToShrink = 2 // Trigger to shrink dynamic buffers
maxFlushPending = 10 // Max fsps to have in order to wait for writeLoop
readLoopReport = 2 * time.Second
// Server should not send a PING (for RTT) before the first PONG has
// been sent to the client. However, in case some client libs don't
// send CONNECT+PING, cap the maximum time before server can send
// the RTT PING.
maxNoRTTPingBeforeFirstPong = 2 * time.Second
// For stalling fast producers
stallClientMinDuration = 100 * time.Millisecond
stallClientMaxDuration = time.Second
)
var readLoopReportThreshold = readLoopReport
// Represent client booleans with a bitmask
type clientFlag uint16
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
infoReceived // The INFO protocol has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
flushOutbound // Marks client as having a flushOutbound call in progress.
noReconnect // Indicate that on close, this connection should not attempt a reconnect
closeConnection // Marks that closeConnection has already been called.
connMarkedClosed // Marks that markConnAsClosed has already been called.
writeLoopStarted // Marks that the writeLoop has been started.
skipFlushOnClose // Marks that flushOutbound() should not be called on connection close.
expectConnect // Marks if this connection is expected to send a CONNECT
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (cf *clientFlag) clear(c clientFlag) {
*cf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// ClosedState is the reason client was closed. This will
// be passed into calls to clearConnection, but will only
// be stored in ConnInfo for monitoring.
type ClosedState int
const (
ClientClosed = ClosedState(iota + 1)
AuthenticationTimeout
AuthenticationViolation
TLSHandshakeError
SlowConsumerPendingBytes
SlowConsumerWriteDeadline
WriteError
ReadError
ParseError
StaleConnection
ProtocolViolation
BadClientProtocolVersion
WrongPort
MaxAccountConnectionsExceeded
MaxConnectionsExceeded
MaxPayloadExceeded
MaxControlLineExceeded
MaxSubscriptionsExceeded
DuplicateRoute
RouteRemoved
ServerShutdown
AuthenticationExpired
WrongGateway
MissingAccount
Revocation
InternalClient
MsgHeaderViolation
NoRespondersRequiresHeaders
ClusterNameConflict
DuplicateRemoteLeafnodeConnection
DuplicateClientID
)
// Some flags passed to processMsgResults
const pmrNoFlag int = 0
const (
pmrCollectQueueNames int = 1 << iota
pmrIgnoreEmptyQueueFilter
pmrAllowSendFromRouteToRoute
pmrMsgImportedFromService
)
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
// Indicate if we should check gwrm or not. Since checking gwrm is done
// when processing inbound messages and requires the lock we want to
// check only when needed. This is set/get using atomic, so needs to
// be memory aligned.
cgwrt int32
kind int
srv *Server
acc *Account
perms *permissions
in readCache
parseState
opts clientOpts
rrTracking *rrTracking
mpay int32
msubs int32
mcl int32
mu sync.Mutex
cid uint64
start time.Time
nonce []byte
pubKey string
nc net.Conn
ncs atomic.Value
out outbound
user *NkeyUser
host string
port uint16
subs map[string]*subscription
replies map[string]*resp
mperms *msgDeny
darray []string
pcd map[*client]struct{}
atmr *time.Timer
ping pinfo
msgb [msgScratchSize]byte
last time.Time
headers bool
rtt time.Duration
rttStart time.Time
route *route
gw *gateway
leaf *leaf
ws *websocket
mqtt *mqtt
// To keep track of gateway replies mapping
gwrm map[string]*gwReplyMap
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
rref byte
trace bool
echo bool
tags jwt.TagList
nameTag string
}
type rrTracking struct {
rmap map[string]*remoteLatency
ptmr *time.Timer
lrt time.Duration
}
// Struct for PING initiation from the server.
type pinfo struct {
tmr *time.Timer
last time.Time
out int
}
// outbound holds pending data for a socket.
type outbound struct {
p []byte // Primary write buffer
s []byte // Secondary for use post flush
nb net.Buffers // net.Buffers for writev IO
sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max.
sws int32 // Number of short writes, used for dynamic resizing.
pb int64 // Total pending/queued bytes.
pm int32 // Total pending/queued messages.
fsp int32 // Flush signals that are pending per producer from readLoop's pcd.
sg *sync.Cond // To signal writeLoop that there is data to flush.
wdl time.Duration // Snapshot of write deadline.
mp int64 // Snapshot of max pending for client.
lft time.Duration // Last flush time for Write.
stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in.
}
type perm struct {
allow *Sublist
deny *Sublist
}
type permissions struct {
sub perm
pub perm
resp *ResponsePermission
pcache map[string]bool
}
// This is used to dynamically track responses and reply subjects
// for dynamic permissioning.
type resp struct {
t time.Time
n int
}
// msgDeny is used when a user permission for subscriptions has a deny
// clause but a subscription could be made that is of broader scope.
// e.g. deny = "foo", but user subscribes to "*". That subscription should
// succeed but no message sent on foo should be delivered.
type msgDeny struct {
deny *Sublist
dcache map[string]bool
}
// routeTarget collects information regarding routes and queue groups for
// sending information to a remote.
type routeTarget struct {
sub *subscription
qs []byte
_qs [32]byte
}
const (
maxResultCacheSize = 512
maxDenyPermCacheSize = 256
maxPermCacheSize = 128
pruneSize = 32
routeTargetInit = 8
replyPermLimit = 4096
)
// Represent read cache booleans with a bitmask
type readCacheFlag uint16
const (
hasMappings readCacheFlag = 1 << iota // For account subject mappings.
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
// These are for clients who are bound to a single account.
genid uint64
results map[string]*SublistResult
// This is for routes and gateways to have their own L1 as well that is account aware.
pacache map[string]*perAccountCache
// This is for when we deliver messages across a route. We use this structure
// to make sure to only send one message and properly scope to queues as needed.
rts []routeTarget
prand *rand.Rand
// These are all temporary totals for an invocation of a read in readloop.
msgs int32
bytes int32
subs int32
rsz int32 // Read buffer size
srs int32 // Short reads, used for dynamic buffer resizing.
// These are for readcache flags to avoind locks.
flags readCacheFlag
}
// set the flag (would be equivalent to set the boolean to true)
func (rcf *readCacheFlag) set(c readCacheFlag) {
*rcf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (rcf *readCacheFlag) clear(c readCacheFlag) {
*rcf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (rcf readCacheFlag) isSet(c readCacheFlag) bool {
return rcf&c != 0
}
const (
defaultMaxPerAccountCacheSize = 4096
defaultPrunePerAccountCacheSize = 256
defaultClosedSubsCheckInterval = 5 * time.Minute
)
var (
maxPerAccountCacheSize = defaultMaxPerAccountCacheSize
prunePerAccountCacheSize = defaultPrunePerAccountCacheSize
closedSubsCheckInterval = defaultClosedSubsCheckInterval
)
// perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients.
type perAccountCache struct {
acc *Account
results *SublistResult
genid uint64
}
func (c *client) String() (id string) {
loaded := c.ncs.Load()
if loaded != nil {
return loaded.(string)
}
return ""
}
// GetName returns the application supplied name for the connection.
func (c *client) GetName() string {
c.mu.Lock()
name := c.opts.Name
c.mu.Unlock()
return name
}
// GetOpts returns the client options provided by the application.
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
// For CLIENT connections, this function returns the client type, that is,
// NATS (for regular clients), MQTT or WS for websocket.
// If this is invoked for a non CLIENT connection, NON_CLIENT is returned.
//
// This function does not lock the client and accesses fields that are supposed
// to be immutable and therefore it can be invoked outside of the client's lock.
func (c *client) clientType() int {
switch c.kind {
case CLIENT:
if c.isMqtt() {
return MQTT
} else if c.isWebsocket() {
return WS
}
return NATS
default:
return NON_CLIENT
}
}
// This is the main subscription struct that indicates
// interest in published messages.
// FIXME(dlc) - This is getting bloated for normal subs, need
// to optionally have an opts section for non-normal stuff.
type subscription struct {
client *client
im *streamImport // This is for import stream support.
shadow []*subscription // This is to track shadowed accounts.
icb msgHandler
subject []byte
queue []byte
sid []byte
origin []byte
nm int64
max int64
qw int32
closed int32
mqtt *mqttSub
}
// Indicate that this subscription is closed.
// This is used in pruning of route and gateway cache items.
func (s *subscription) close() {
atomic.StoreInt32(&s.closed, 1)
}
// Return true if this subscription was unsubscribed
// or its connection has been closed.
func (s *subscription) isClosed() bool {
return atomic.LoadInt32(&s.closed) == 1
}
type clientOpts struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
TLSRequired bool `json:"tls_required"`
Nkey string `json:"nkey,omitempty"`
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
Token string `json:"auth_token,omitempty"`
Username string `json:"user,omitempty"`
Password string `json:"pass,omitempty"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Account string `json:"account,omitempty"`
AccountNew bool `json:"new_account,omitempty"`
Headers bool `json:"headers,omitempty"`
NoResponders bool `json:"no_responders,omitempty"`
// Routes and Leafnodes only
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true}
var internalOpts = clientOpts{Verbose: false, Pedantic: false, Echo: false}
func (c *client) setTraceLevel() {
if c.kind == SYSTEM && !(atomic.LoadInt32(&c.srv.logging.traceSysAcc) != 0) {
c.trace = false
} else {
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
}
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
// Outbound data structure setup
c.out.sz = startBufSize
c.out.sg = sync.NewCond(&(c.mu))
opts := s.getOpts()
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
c.subs = make(map[string]*subscription)
c.echo = true
c.setTraceLevel()
// This is a scratch buffer used for processMsg()
// The msg header starts with "RMSG ", which can be used
// for both local and routes.
// in bytes that is [82 77 83 71 32].
c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
var conn string
if c.nc != nil {
if addr := c.nc.RemoteAddr(); addr != nil {
if conn = addr.String(); conn != _EMPTY_ {
host, port, _ := net.SplitHostPort(conn)
iPort, _ := strconv.Atoi(port)
c.host, c.port = host, uint16(iPort)
// Now that we have extracted host and port, escape
// the string because it is going to be used in Sprintf
conn = strings.ReplaceAll(conn, "%", "%%")
}
}
}
switch c.kind {
case CLIENT:
switch c.clientType() {
case NATS:
c.ncs.Store(fmt.Sprintf("%s - cid:%d", conn, c.cid))
case WS:
c.ncs.Store(fmt.Sprintf("%s - wid:%d", conn, c.cid))
case MQTT:
c.ncs.Store(fmt.Sprintf("%s - mid:%d", conn, c.cid))
}
case ROUTER:
c.ncs.Store(fmt.Sprintf("%s - rid:%d", conn, c.cid))
case GATEWAY:
c.ncs.Store(fmt.Sprintf("%s - gid:%d", conn, c.cid))
case LEAF:
c.ncs.Store(fmt.Sprintf("%s - lid:%d", conn, c.cid))
case SYSTEM:
c.ncs.Store("SYSTEM")
case JETSTREAM:
c.ncs.Store("JETSTREAM")
case ACCOUNT:
c.ncs.Store("ACCOUNT")
}
}
// RemoteAddress expose the Address of the client connection,
// nil when not connected or unknown
func (c *client) RemoteAddress() net.Addr {
c.mu.Lock()
defer c.mu.Unlock()
if c.nc == nil {
return nil
}
return c.nc.RemoteAddr()
}
// Helper function to report errors.
func (c *client) reportErrRegisterAccount(acc *Account, err error) {
if err == ErrTooManyAccountConnections {
c.maxAccountConnExceeded()
return
}
c.Errorf("Problem registering with account [%s]", acc.Name)
c.sendErr("Failed Account Registration")
}
// registerWithAccount will register the given user with a specific
// account. This will change the subject namespace.
func (c *client) registerWithAccount(acc *Account) error {
if acc == nil || acc.sl == nil {
return ErrBadAccount
}
// If we were previously registered, usually to $G, do accounting here to remove.
if c.acc != nil {
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.decActiveAccounts()
}
}
c.mu.Lock()
kind := c.kind
srv := c.srv
c.acc = acc
c.applyAccountLimits()
c.mu.Unlock()
// Check if we have a max connections violation
if kind == CLIENT && acc.MaxTotalConnectionsReached() {
return ErrTooManyAccountConnections
} else if kind == LEAF && acc.MaxTotalLeafNodesReached() {
return ErrTooManyAccountConnections
}
// Add in new one.
if prev := acc.addClient(c); prev == 0 && srv != nil {
srv.incActiveAccounts()
}
return nil
}
// Helper to determine if we have met or exceeded max subs.
func (c *client) subsAtLimit() bool {
return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs)
}
func minLimit(value *int32, limit int32) bool {
if *value != jwt.NoLimit {
if limit != jwt.NoLimit {
if limit < *value {
*value = limit
return true
}
}
} else if limit != jwt.NoLimit {
*value = limit
return true
}
return false
}
// Apply account limits
// Lock is held on entry.
// FIXME(dlc) - Should server be able to override here?
func (c *client) applyAccountLimits() {
if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) {
return
}
c.mpay = jwt.NoLimit
c.msubs = jwt.NoLimit
if c.opts.JWT != "" { // user jwt implies account
if uc, _ := jwt.DecodeUserClaims(c.opts.JWT); uc != nil {
c.mpay = int32(uc.Limits.Payload)
c.msubs = int32(uc.Limits.Subs)
if uc.IssuerAccount != _EMPTY_ && uc.IssuerAccount != uc.Issuer {
if scope, ok := c.acc.signingKeys[uc.Issuer]; ok {
if userScope, ok := scope.(*jwt.UserScope); ok {
// if signing key disappeared or changed and we don't get here, the client will be disconnected
c.mpay = int32(userScope.Template.Limits.Payload)
c.msubs = int32(userScope.Template.Limits.Subs)
}
}
}
}
}
minLimit(&c.mpay, c.acc.mpay)
minLimit(&c.msubs, c.acc.msubs)
s := c.srv
opts := s.getOpts()
mPay := opts.MaxPayload
// options encode unlimited differently
if mPay == 0 {
mPay = jwt.NoLimit
}
mSubs := int32(opts.MaxSubs)
if mSubs == 0 {
mSubs = jwt.NoLimit
}
wasUnlimited := c.mpay == jwt.NoLimit
if minLimit(&c.mpay, mPay) && !wasUnlimited {
c.Errorf("Max Payload set to %d from server overrides account or user config", opts.MaxPayload)
}
wasUnlimited = c.msubs == jwt.NoLimit
if minLimit(&c.msubs, mSubs) && !wasUnlimited {
c.Errorf("Max Subscriptions set to %d from server overrides account or user config", opts.MaxSubs)
}
if c.subsAtLimit() {
go func() {
c.maxSubsExceeded()
time.Sleep(20 * time.Millisecond)
c.closeConnection(MaxSubscriptionsExceeded)
}()
}
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterUser(user *User) {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return
}
}
c.mu.Lock()
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
}
// RegisterNkey allows auth to call back into a new nkey
// client with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterNkeyUser(user *NkeyUser) error {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return err
}
}
c.mu.Lock()
c.user = user
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
return nil
}
func splitSubjectQueue(sq string) ([]byte, []byte, error) {
vals := strings.Fields(strings.TrimSpace(sq))
s := []byte(vals[0])
var q []byte
if len(vals) == 2 {
q = []byte(vals[1])
} else if len(vals) > 2 {
return nil, nil, fmt.Errorf("invalid subject-queue %q", sq)
}
return s, q, nil
}
// Initializes client.perms structure.
// Lock is held on entry.
func (c *client) setPermissions(perms *Permissions) {
if perms == nil {
return
}
c.perms = &permissions{}
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
if perms.Publish != nil {
if perms.Publish.Allow != nil {
c.perms.pub.allow = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Allow {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.allow.Insert(sub)
}
if len(perms.Publish.Deny) > 0 {
c.perms.pub.deny = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Deny {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Check if we are allowed to send responses.
if perms.Response != nil {
rp := *perms.Response
c.perms.resp = &rp
c.replies = make(map[string]*resp)
}
// Loop over subscribe permissions
if perms.Subscribe != nil {
var err error
if len(perms.Subscribe.Allow) > 0 {
c.perms.sub.allow = NewSublistWithCache()
}
for _, subSubject := range perms.Subscribe.Allow {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.allow.Insert(sub)
}
if len(perms.Subscribe.Deny) > 0 {
c.perms.sub.deny = NewSublistWithCache()
// Also hold onto this array for later.
c.darray = perms.Subscribe.Deny
}
for _, subSubject := range perms.Subscribe.Deny {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.deny.Insert(sub)
}
}
// If we are a leafnode and we are the hub copy the extracted perms
// to resend back to soliciting server. These are reversed from the
// way routes interpret them since this is how the soliciting server
// will receive these back in an update INFO.
if c.isHubLeafNode() {
c.opts.Import = perms.Subscribe
c.opts.Export = perms.Publish
}
}
// Check to see if we have an expiration for the user JWT via base claims.
// FIXME(dlc) - Clear on connect with new JWT.
func (c *client) setExpiration(claims *jwt.ClaimsData, validFor time.Duration) {
if claims.Expires == 0 {
if validFor != 0 {
c.setExpirationTimer(validFor)
}
return
}
expiresAt := time.Duration(0)
tn := time.Now().Unix()
if claims.Expires > tn {
expiresAt = time.Duration(claims.Expires-tn) * time.Second
}
if validFor != 0 && validFor < expiresAt {
c.setExpirationTimer(validFor)
} else {
c.setExpirationTimer(expiresAt)
}
}
// This will load up the deny structure used for filtering delivered
// messages based on a deny clause for subscriptions.
// Lock should be held.
func (c *client) loadMsgDenyFilter() {
c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)}
for _, sub := range c.darray {
c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
}
}
// writeLoop is the main socket write functionality.
// Runs in its own Go routine.
func (c *client) writeLoop() {
defer c.srv.grWG.Done()
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.flags.set(writeLoopStarted)
c.mu.Unlock()
// Used to check that we did flush from last wake up.
waitOk := true
var close bool
// Main loop. Will wait to be signaled and then will use
// buffered outbound structure for efficient writev to the underlying socket.
for {
c.mu.Lock()
if close = c.isClosed(); !close {
owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending
if waitOk && (c.out.pb == 0 || owtf) {
c.out.sg.Wait()
// Check that connection has not been closed while lock was released
// in the conditional wait.
close = c.isClosed()
}
}
if close {
c.flushAndClose(false)
c.mu.Unlock()
// We should always call closeConnection() to ensure that state is
// properly cleaned-up. It will be a no-op if already done.
c.closeConnection(WriteError)
// Now explicitly call reconnect(). Thanks to ref counting, we know
// that the reconnect will execute only after connection has been
// removed from the server state.
c.reconnect()
return
}
// Flush data
waitOk = c.flushOutbound()
c.mu.Unlock()
}
}
// flushClients will make sure to flush any clients we may have
// sent to during processing. We pass in a budget as a time.Duration
// for how much time to spend in place flushing for this client. This
// will normally be called in the readLoop of the client who sent the
// message that now is being delivered.
func (c *client) flushClients(budget time.Duration) time.Time {
last := time.Now()
// Check pending clients for flush.
for cp := range c.pcd {
// TODO(dlc) - Wonder if it makes more sense to create a new map?
delete(c.pcd, cp)
// Queue up a flush for those in the set
cp.mu.Lock()
// Update last activity for message delivery
cp.last = last
// Remove ourselves from the pending list.
cp.out.fsp--
// Just ignore if this was closed.
if cp.isClosed() {
cp.mu.Unlock()
continue
}
if budget > 0 && cp.out.lft < 2*budget && cp.flushOutbound() {
budget -= cp.out.lft
} else {
cp.flushSignal()
}
cp.mu.Unlock()
}
return last
}
// readLoop is the main socket read functionality.
// Runs in its own Go routine.
func (c *client) readLoop(pre []byte) {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
s := c.srv
defer s.grWG.Done()
if c.isClosed() {
c.mu.Unlock()
return
}
nc := c.nc
ws := c.isWebsocket()
if c.isMqtt() {
c.mqtt.r = &mqttReader{reader: nc}
}
c.in.rsz = startBufSize
// Snapshot max control line since currently can not be changed on reload and we
// were checking it on each call to parse. If this changes and we allow MaxControlLine
// to be reloaded without restart, this code will need to change.
c.mcl = MAX_CONTROL_LINE_SIZE
if s != nil {
if opts := s.getOpts(); opts != nil {
c.mcl = int32(opts.MaxControlLine)
}
}
// Check the per-account-cache for closed subscriptions
cpacc := c.kind == ROUTER || c.kind == GATEWAY
// Last per-account-cache check for closed subscriptions
lpacc := time.Now()
acc := c.acc
c.mu.Unlock()
defer func() {
if c.isMqtt() {
s.mqttHandleWill(c)
}
// These are used only in the readloop, so we can set them to nil
// on exit of the readLoop.
c.in.results, c.in.pacache = nil, nil
}()
// Start read buffer.
b := make([]byte, c.in.rsz)
// Websocket clients will return several slices if there are multiple
// websocket frames in the blind read. For non WS clients though, we
// will always have 1 slice per loop iteration. So we define this here
// so non WS clients will use bufs[0] = b[:n].
var _bufs [1][]byte
bufs := _bufs[:1]
var wsr *wsReadInfo
if ws {
wsr = &wsReadInfo{}
wsr.init()
}
// If we have a pre buffer parse that first.
if len(pre) > 0 {
c.parse(pre)
}
for {
n, err := nc.Read(b)
// If we have any data we will try to parse and exit at the end.
if n == 0 && err != nil {
c.closeConnection(closedStateForErr(err))
return
}
if ws {
bufs, err = c.wsRead(wsr, nc, b[:n])
if bufs == nil && err != nil {
if err != io.EOF {
c.Errorf("read error: %v", err)
}
c.closeConnection(closedStateForErr(err))
} else if bufs == nil {
continue
}
} else {
bufs[0] = b[:n]
}
start := time.Now()
// Check if the account has mappings and if so set the local readcache flag.
// We check here to make sure any changes such as config reload are reflected here.
if c.kind == CLIENT {
if acc.hasMappings() {
c.in.flags.set(hasMappings)
} else {
c.in.flags.clear(hasMappings)
}
}
// Clear inbound stats cache
c.in.msgs = 0
c.in.bytes = 0
c.in.subs = 0
// Main call into parser for inbound data. This will generate callouts
// to process messages, etc.
for i := 0; i < len(bufs); i++ {
if err := c.parse(bufs[i]); err != nil {
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// Need to call flushClients because some of the clients have been
// assigned messages and their "fsp" incremented, and need now to be
// decremented and their writeLoop signaled.
c.flushClients(0)
// handled inline
if err != ErrMaxPayload && err != ErrAuthentication {
c.Error(err)
c.closeConnection(ProtocolViolation)
}
return
}
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
if c.in.msgs > 0 {
atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
}
// Budget to spend in place flushing outbound data.
// Client will be checked on several fronts to see
// if applicable. Routes and Gateways will never
// spend time flushing outbound in place.
var budget time.Duration
if c.kind == CLIENT {
budget = time.Millisecond
}
// Flush, or signal to writeLoop to flush to socket.
last := c.flushClients(budget)
// Update activity, check read buffer size.
c.mu.Lock()
// Activity based on interest changes or data/msgs.
if c.in.msgs > 0 || c.in.subs > 0 {
c.last = last
}
if n >= cap(b) {
c.in.srs = 0
} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
c.in.srs++
}
// Update read buffer size as/if needed.
if n >= cap(b) && cap(b) < maxBufSize {
// Grow
c.in.rsz = int32(cap(b) * 2)
b = make([]byte, c.in.rsz)
} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
c.in.rsz = int32(cap(b) / 2)
b = make([]byte, c.in.rsz)
}
// re-snapshot the account since it can change during reload, etc.
acc = c.acc
c.mu.Unlock()
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// We could have had a read error from above but still read some data.
// If so do the close here unconditionally.
if err != nil {
c.closeConnection(closedStateForErr(err))
return
}
if cpacc && (start.Sub(lpacc)) >= closedSubsCheckInterval {
c.pruneClosedSubFromPerAccountCache()
lpacc = time.Now()
}
}
}
// Returns the appropriate closed state for a given read error.
func closedStateForErr(err error) ClosedState {
if err == io.EOF {
return ClientClosed
}
return ReadError
}
// collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo.
// This will return a copy on purpose.
func (c *client) collapsePtoNB() (net.Buffers, int64) {
if c.isWebsocket() {
return c.wsCollapsePtoNB()
}
if c.out.p != nil {
p := c.out.p
c.out.p = nil
return append(c.out.nb, p), c.out.pb
}
return c.out.nb, c.out.pb
}
// This will handle the fixup needed on a partial write.
// Assume pending has been already calculated correctly.
func (c *client) handlePartialWrite(pnb net.Buffers) {
if c.isWebsocket() {
c.ws.frames = append(pnb, c.ws.frames...)
return
}
nb, _ := c.collapsePtoNB()
// The partial needs to be first, so append nb to pnb
c.out.nb = append(pnb, nb...)
}
// flushOutbound will flush outbound buffer to a client.
// Will return true if data was attempted to be written.
// Lock must be held
func (c *client) flushOutbound() bool {
if c.flags.isSet(flushOutbound) {
// For CLIENT connections, it is possible that the readLoop calls
// flushOutbound(). If writeLoop and readLoop compete and we are
// here we should release the lock to reduce the risk of spinning.
c.mu.Unlock()
runtime.Gosched()
c.mu.Lock()
return false
}
c.flags.set(flushOutbound)
defer c.flags.clear(flushOutbound)
// Check for nothing to do.
if c.nc == nil || c.srv == nil || c.out.pb == 0 {
return true // true because no need to queue a signal.
}
// Place primary on nb, assign primary to secondary, nil out nb and secondary.
nb, attempted := c.collapsePtoNB()
c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil
if nb == nil {
return true
}
// For selecting primary replacement.
cnb := nb
var lfs int
if len(cnb) > 0 {
lfs = len(cnb[0])
}
// In case it goes away after releasing the lock.
nc := c.nc
apm := c.out.pm
// Capture this (we change the value in some tests)
wdl := c.out.wdl
// Do NOT hold lock during actual IO.
c.mu.Unlock()
// flush here
start := time.Now()
// FIXME(dlc) - writev will do multiple IOs past 1024 on
// most platforms, need to account for that with deadline?
nc.SetWriteDeadline(start.Add(wdl))
// Actual write to the socket.
n, err := nb.WriteTo(nc)
nc.SetWriteDeadline(time.Time{})
lft := time.Since(start)
// Re-acquire client lock.
c.mu.Lock()
// Ignore ErrShortWrite errors, they will be handled as partials.
if err != nil && err != io.ErrShortWrite {
// Handle timeout error (slow consumer) differently
if ne, ok := err.(net.Error); ok && ne.Timeout() {
if closed := c.handleWriteTimeout(n, attempted, len(cnb)); closed {
return true
}
} else {
// Other errors will cause connection to be closed.
// For clients, report as debug but for others report as error.
report := c.Debugf
if c.kind != CLIENT {
report = c.Errorf
}
report("Error flushing: %v", err)
c.markConnAsClosed(WriteError)
return true
}
}
// Update flush time statistics.
c.out.lft = lft
// Subtract from pending bytes and messages.
c.out.pb -= n
if c.isWebsocket() {
c.ws.fs -= n
}
c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials.
// Check for partial writes
// TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin.
if n != attempted && n > 0 {
c.handlePartialWrite(nb)
} else if int32(n) >= c.out.sz {
c.out.sws = 0
}
// Adjust based on what we wrote plus any pending.
pt := n + c.out.pb
// Adjust sz as needed downward, keeping power of 2.
// We do this at a slower rate.
if pt < int64(c.out.sz) && c.out.sz > minBufSize {
c.out.sws++
if c.out.sws > shortsToShrink {
c.out.sz >>= 1
}
}
// Adjust sz as needed upward, keeping power of 2.
if pt > int64(c.out.sz) && c.out.sz < maxBufSize {
c.out.sz <<= 1
}
// Check to see if we can reuse buffers.
if lfs != 0 && n >= int64(lfs) {
oldp := cnb[0][:0]
if cap(oldp) >= int(c.out.sz) {
// Replace primary or secondary if they are nil, reusing same buffer.
if c.out.p == nil {
c.out.p = oldp
} else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) {
c.out.s = oldp
}
}
}
// Check that if there is still data to send and writeLoop is in wait,
// then we need to signal.
if c.out.pb > 0 {
c.flushSignal()
}
// Check if we have a stalled gate and if so and we are recovering release
// any stalled producers. Only kind==CLIENT will stall.
if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/2) {
close(c.out.stc)
c.out.stc = nil
}
return true
}
// This is invoked from flushOutbound() for io/timeout error (slow consumer).
// Returns a boolean to indicate if the connection has been closed or not.
// Lock is held on entry.
func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) bool {
if tlsConn, ok := c.nc.(*tls.Conn); ok {
if !tlsConn.ConnectionState().HandshakeComplete {
// Likely a TLSTimeout error instead...
c.markConnAsClosed(TLSHandshakeError)
// Would need to coordinate with tlstimeout()
// to avoid double logging, so skip logging
// here, and don't report a slow consumer error.
return true
}
} else if c.flags.isSet(expectConnect) && !c.flags.isSet(connectReceived) {
// Under some conditions, a connection may hit a slow consumer write deadline
// before the authorization timeout. If that is the case, then we handle
// as slow consumer though we do not increase the counter as that can be
// misleading.
c.markConnAsClosed(SlowConsumerWriteDeadline)
return true
}
// Slow consumer here..
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: WriteDeadline of %v exceeded with %d chunks of %d total bytes.",
c.out.wdl, numChunks, attempted)
// We always close CLIENT connections, or when nothing was written at all...
if c.kind == CLIENT || written == 0 {
c.markConnAsClosed(SlowConsumerWriteDeadline)
return true
}
return false
}
// Marks this connection has closed with the given reason.
// Sets the connMarkedClosed flag and skipFlushOnClose depending on the reason.
// Depending on the kind of connection, the connection will be saved.
// If a writeLoop has been started, the final flush will be done there, otherwise
// flush and close of TCP connection is done here in place.
// Returns true if closed in place, flase otherwise.
// Lock is held on entry.
func (c *client) markConnAsClosed(reason ClosedState) {
// Possibly set skipFlushOnClose flag even if connection has already been
// mark as closed. The rationale is that a connection may be closed with
// a reason that justifies a flush (say after sending an -ERR), but then
// the flushOutbound() gets a write error. If that happens, connection
// being lost, there is no reason to attempt to flush again during the
// teardown when the writeLoop exits.
var skipFlush bool
switch reason {
case ReadError, WriteError, SlowConsumerPendingBytes, SlowConsumerWriteDeadline, TLSHandshakeError:
c.flags.set(skipFlushOnClose)
skipFlush = true
}
if c.flags.isSet(connMarkedClosed) {
return
}
c.flags.set(connMarkedClosed)
// For a websocket client, unless we are told not to flush, enqueue
// a websocket CloseMessage based on the reason.
if !skipFlush && c.isWebsocket() && !c.ws.closeSent {
c.wsEnqueueCloseMessage(reason)
}
// Be consistent with the creation: for routes and gateways,
// we use Noticef on create, so use that too for delete.
if c.srv != nil {
if c.kind == ROUTER || c.kind == GATEWAY {
c.Noticef("%s connection closed: %s", c.typeString(), reason)
} else { // Client, System, Jetstream, Account and Leafnode connections.
c.Debugf("%s connection closed: %s", c.typeString(), reason)
}
}
// Save off the connection if its a client or leafnode.
if c.kind == CLIENT || c.kind == LEAF {
if nc := c.nc; nc != nil && c.srv != nil {
// TODO: May want to send events to single go routine instead
// of creating a new go routine for each save.
go c.srv.saveClosedClient(c, nc, reason)
}
}
// If writeLoop exists, let it do the final flush, close and teardown.
if c.flags.isSet(writeLoopStarted) {
// Since we want the writeLoop to do the final flush and tcp close,
// we want the reconnect to be done there too. However, it should'nt
// happen before the connection has been removed from the server
// state (end of closeConnection()). This ref count allows us to
// guarantee that.
c.rref++
c.flushSignal()
return
}
// Flush (if skipFlushOnClose is not set) and close in place. If flushing,
// use a small WriteDeadline.
c.flushAndClose(true)
}
// flushSignal will use server to queue the flush IO operation to a pool of flushers.
// Lock must be held.
func (c *client) flushSignal() {
c.out.sg.Signal()
}
// Traces a message.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceMsg(msg []byte) {
maxTrace := c.srv.getOpts().MaxTracedMsgLen
if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace {
c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace])
} else {
c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF])
}
}
// Traces an incoming operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
// Traces an outgoing operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
switch c.kind {
case ROUTER:
c.processRouteInfo(&info)
case GATEWAY:
c.processGatewayInfo(&info)
case LEAF:
return c.processLeafnodeInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
close := true
switch c.kind {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
case GATEWAY:
c.Errorf("Gateway Error %s", errStr)
case LEAF:
c.Errorf("Leafnode Error %s", errStr)
c.leafProcessErr(errStr)
close = false
case JETSTREAM:
c.Errorf("JetStream Error %s", errStr)
}
if close {
c.closeConnection(ParseError)
}
}
// Password pattern matcher.
var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
// removePassFromTrace removes any notion of passwords from trace
// messages for logging.
func removePassFromTrace(arg []byte) []byte {
if !bytes.Contains(arg, []byte(`pass`)) {
return arg
}
// Take a copy of the connect proto just for the trace message.
var _arg [4096]byte
buf := append(_arg[:0], arg...)
m := passPat.FindAllSubmatchIndex(buf, -1)
if len(m) == 0 {
return arg
}
redactedPass := []byte("[REDACTED]")
for _, i := range m {
if len(i) < 4 {
continue
}
start := i[2]
end := i[3]
// Replace password substring.
buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
break
}
return buf
}
// Returns the RTT by computing the elapsed time since now and `start`.
// On Windows VM where I (IK) run tests, time.Since() will return 0
// (I suspect some time granularity issues). So return at minimum 1ns.
func computeRTT(start time.Time) time.Duration {
rtt := time.Since(start)
if rtt <= 0 {
rtt = time.Nanosecond
}
return rtt
}
// processConnect will process a client connect op.
func (c *client) processConnect(arg []byte) error {
supportsHeaders := c.srv.supportsHeaders()
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for !c.isClosed() {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
// Estimate RTT to start.
if c.kind == CLIENT {
c.rtt = computeRTT(c.start)
if c.srv != nil {
c.clearPingTimer()
c.srv.setFirstPingTimer(c)
}
}
kind := c.kind
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
c.echo = c.opts.Echo
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
account := c.opts.Account
accountNew := c.opts.AccountNew
if c.kind == CLIENT {
var ncs string
if c.opts.Version != "" {
ncs = fmt.Sprintf("v%s", c.opts.Version)
}
if c.opts.Lang != "" {
if c.opts.Version == _EMPTY_ {
ncs = c.opts.Lang
} else {
ncs = fmt.Sprintf("%s:%s", ncs, c.opts.Lang)
}
}
if c.opts.Name != "" {
if c.opts.Version == _EMPTY_ && c.opts.Lang == _EMPTY_ {
ncs = c.opts.Name
} else {
ncs = fmt.Sprintf("%s:%s", ncs, c.opts.Name)
}
}
if ncs != _EMPTY_ {
c.ncs.Store(fmt.Sprintf("%s - %q", c.String(), ncs))
}
}
// If websocket client and JWT not in the CONNECT, use the cookie JWT (possibly empty).
if ws := c.ws; ws != nil && c.opts.JWT == "" {
c.opts.JWT = ws.cookieJwt
}
// when not in operator mode, discard the jwt
if srv != nil && srv.trustedKeys == nil {
c.opts.JWT = ""
}
ujwt := c.opts.JWT
// For headers both client and server need to support.
c.headers = supportsHeaders && c.opts.Headers
c.mu.Unlock()
if srv != nil {
// Applicable to clients only:
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if kind == CLIENT && proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthentication(c); !ok {
// We may fail here because we reached max limits on an account.
if ujwt != "" {
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
srv.mu.Lock()
tooManyAccCons := acc != nil && acc != srv.gacc
srv.mu.Unlock()
if tooManyAccCons {
return ErrTooManyAccountConnections
}
}
c.authViolation()
return ErrAuthentication
}
// Check for Account designation, this section should be only used when there is not a jwt.
if account != "" {
var acc *Account
var wasNew bool
var err error
if !srv.NewAccountsAllowed() {
acc, err = srv.LookupAccount(account)
if err != nil {
c.Errorf(err.Error())
c.sendErr(ErrMissingAccount.Error())
return err
} else if accountNew && acc != nil {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
} else {
// We can create this one on the fly.
acc, wasNew = srv.LookupOrRegisterAccount(account)
if accountNew && !wasNew {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
}
// If we are here we can register ourselves with the new account.
if err := c.registerWithAccount(acc); err != nil {
c.reportErrRegisterAccount(acc, err)
return ErrBadAccount
}
} else if c.acc == nil {
// By default register with the global account.
c.registerWithAccount(srv.globalAccount())
}
}
switch kind {
case CLIENT:
// Check client protocol request if it exists.
if proto < ClientProtoZero || proto > ClientProtoInfo {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection(BadClientProtocolVersion)
return ErrBadClientProtocol
}
// Check to see that if no_responders is requested
// they have header support on as well.
c.mu.Lock()
misMatch := c.opts.NoResponders && !c.headers
c.mu.Unlock()
if misMatch {
c.sendErr(ErrNoRespondersRequiresHeaders.Error())
c.closeConnection(NoRespondersRequiresHeaders)
return ErrNoRespondersRequiresHeaders
}
if verbose {
c.sendOK()
}
case ROUTER:
// Delegate the rest of processing to the route
return c.processRouteConnect(srv, arg, lang)
case GATEWAY:
// Delegate the rest of processing to the gateway
return c.processGatewayConnect(arg)
case LEAF:
// Delegate the rest of processing to the leaf node
return c.processLeafNodeConnect(srv, arg, lang)
}
return nil
}
func (c *client) sendErrAndErr(err string) {
c.sendErr(err)
c.Errorf(err)
}
func (c *client) sendErrAndDebug(err string) {
c.sendErr(err)
c.Debugf(err)
}
func (c *client) authTimeout() {
c.sendErrAndDebug("Authentication Timeout")
c.closeConnection(AuthenticationTimeout)
}
func (c *client) authExpired() {
c.sendErrAndDebug("User Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) accountAuthExpired() {
c.sendErrAndDebug("Account Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) authViolation() {
var s *Server
var hasTrustedNkeys, hasNkeys, hasUsers bool
if s = c.srv; s != nil {
s.mu.Lock()
hasTrustedNkeys = len(s.trustedKeys) > 0
hasNkeys = s.nkeys != nil
hasUsers = s.users != nil
s.mu.Unlock()
defer s.sendAuthErrorEvent(c)
}
if hasTrustedNkeys {
c.Errorf("%v", ErrAuthentication)
} else if hasNkeys {
c.Errorf("%s - Nkey %q",
ErrAuthentication.Error(),
c.opts.Nkey)
} else if hasUsers {
c.Errorf("%s - User %q",
ErrAuthentication.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthentication.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection(AuthenticationViolation)
}
func (c *client) maxAccountConnExceeded() {
c.sendErrAndErr(ErrTooManyAccountConnections.Error())
c.closeConnection(MaxAccountConnectionsExceeded)
}
func (c *client) maxConnExceeded() {
c.sendErrAndErr(ErrTooManyConnections.Error())
c.closeConnection(MaxConnectionsExceeded)
}
func (c *client) maxSubsExceeded() {
c.sendErrAndErr(ErrTooManySubs.Error())
}
func (c *client) maxPayloadViolation(sz int, max int32) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection(MaxPayloadExceeded)
}
// queueOutbound queues data for a clientconnection.
// Returns if the data is referenced or not. If referenced, the caller
// should not reuse the `data` array.
// Lock should be held.
func (c *client) queueOutbound(data []byte) bool {
// Do not keep going if closed
if c.isClosed() {
return false
}
// Assume data will not be referenced
referenced := false
// Add to pending bytes total.
c.out.pb += int64(len(data))
// Check for slow consumer via pending bytes limit.
// ok to return here, client is going away.
if c.kind == CLIENT && c.out.pb > c.out.mp {
// Perf wise, it looks like it is faster to optimistically add than
// checking current pb+len(data) and then add to pb.
c.out.pb -= int64(len(data))
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
c.markConnAsClosed(SlowConsumerPendingBytes)
return referenced
}
if c.out.p == nil && len(data) < maxBufSize {
if c.out.sz == 0 {
c.out.sz = startBufSize
}
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) {
c.out.p = c.out.s
c.out.s = nil
} else {
// FIXME(dlc) - make power of 2 if less than maxBufSize?
c.out.p = make([]byte, 0, c.out.sz)
}
}
// Determine if we copy or reference
available := cap(c.out.p) - len(c.out.p)
if len(data) > available {
// We can't fit everything into existing primary, but message will
// fit in next one we allocate or utilize from the secondary.
// So copy what we can.
if available > 0 && len(data) < int(c.out.sz) {
c.out.p = append(c.out.p, data[:available]...)
data = data[available:]
}
// Put the primary on the nb if it has a payload
if len(c.out.p) > 0 {
c.out.nb = append(c.out.nb, c.out.p)
c.out.p = nil
}
// Check for a big message, and if found place directly on nb
// FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize
if len(data) > maxBufSize {
c.out.nb = append(c.out.nb, data)
referenced = true
} else {
// We will copy to primary.
if c.out.p == nil {
// Grow here
if (c.out.sz << 1) <= maxBufSize {
c.out.sz <<= 1
}
if len(data) > int(c.out.sz) {
c.out.p = make([]byte, 0, len(data))
} else {
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch?
c.out.p = c.out.s
c.out.s = nil
} else {
c.out.p = make([]byte, 0, c.out.sz)
}
}
}
c.out.p = append(c.out.p, data...)
}
} else {
c.out.p = append(c.out.p, data...)
}
// Check here if we should create a stall channel if we are falling behind.
// We do this here since if we wait for consumer's writeLoop it could be
// too late with large number of fan in producers.
if c.out.pb > c.out.mp/2 && c.out.stc == nil {
c.out.stc = make(chan struct{})
}
return referenced
}
// Assume the lock is held upon entry.
func (c *client) enqueueProtoAndFlush(proto []byte, doFlush bool) {
if c.isClosed() {
return
}
c.queueOutbound(proto)
if !(doFlush && c.flushOutbound()) {
c.flushSignal()
}
}
// Queues and then flushes the connection. This should only be called when
// the writeLoop cannot be started yet. Use enqueueProto() otherwise.
// Lock is held on entry.
func (c *client) sendProtoNow(proto []byte) {
c.enqueueProtoAndFlush(proto, true)
}
// Enqueues the given protocol and signal the writeLoop if necessary.
// Lock is held on entry.
func (c *client) enqueueProto(proto []byte) {
c.enqueueProtoAndFlush(proto, false)
}
// Assume the lock is held upon entry.
func (c *client) sendPong() {
if c.trace {
c.traceOutOp("PONG", nil)
}
c.enqueueProto([]byte(pongProto))
}
// Used to kick off a RTT measurement for latency tracking.
func (c *client) sendRTTPing() bool {
c.mu.Lock()
sent := c.sendRTTPingLocked()
c.mu.Unlock()
return sent
}
// Used to kick off a RTT measurement for latency tracking.
// This is normally called only when the caller has checked that
// the c.rtt is 0 and wants to force an update by sending a PING.
// Client lock held on entry.
func (c *client) sendRTTPingLocked() bool {
if c.isMqtt() {
return false
}
// Most client libs send a CONNECT+PING and wait for a PONG from the
// server. So if firstPongSent flag is set, it is ok for server to
// send the PING. But in case we have client libs that don't do that,
// allow the send of the PING if more than 2 secs have elapsed since
// the client TCP connection was accepted.
if !c.isClosed() &&
(c.flags.isSet(firstPongSent) || time.Since(c.start) > maxNoRTTPingBeforeFirstPong) {
c.sendPing()
return true
}
return false
}
// Assume the lock is held upon entry.
func (c *client) sendPing() {
c.rttStart = time.Now()
c.ping.out++
if c.trace {
c.traceOutOp("PING", nil)
}
c.enqueueProto([]byte(pingProto))
}
// Generates the INFO to be sent to the client with the client ID included.
// info arg will be copied since passed by value.
// Assume lock is held.
func (c *client) generateClientInfoJSON(info Info) []byte {
info.CID = c.cid
info.ClientIP = c.host
info.MaxPayload = c.mpay
if c.isWebsocket() {
info.ClientConnectURLs = info.WSConnectURLs
}
info.WSConnectURLs = nil
// Generate the info json
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
return bytes.Join(pcs, []byte(" "))
}
func (c *client) sendErr(err string) {
c.mu.Lock()
if c.trace {
c.traceOutOp("-ERR", []byte(err))
}
if !c.isMqtt() {
c.enqueueProto([]byte(fmt.Sprintf(errProto, err)))
}
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
if c.trace {
c.traceOutOp("OK", nil)
}
c.enqueueProto([]byte(okProto))
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.sendPong()
// Record this to suppress us sending one if this
// is within a given time interval for activity.
c.ping.last = time.Now()
// If not a CLIENT, we are done. Also the CONNECT should
// have been received, but make sure it is so before proceeding
if c.kind != CLIENT || !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkInfoChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology
// or we have a different max payload. We will send this first before
// pong since most clients do flush after connect call.
checkInfoChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkInfoChange {
opts := srv.getOpts()
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and code here to
// send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) {
c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo()))
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.mu.Lock()
c.ping.out = 0
c.rtt = computeRTT(c.rttStart)
srv := c.srv
reorderGWs := c.kind == GATEWAY && c.gw.outbound
c.mu.Unlock()
if reorderGWs {
srv.gateway.orderOutboundConnections()
}
}
// Will return the parts from the raw wire msg.
func (c *client) msgParts(data []byte) (hdr []byte, msg []byte) {
if c != nil && c.pa.hdr > 0 {
return data[:c.pa.hdr], data[c.pa.hdr:]
}
return nil, data
}
// Header pubs take form HPUB <subject> [reply] <hdr_len> <total_len>\r\n
func (c *client) processHeaderPub(arg []byte) error {
if !c.headers {
return ErrMsgHeadersNotSupported
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_HPUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 3:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.hdr = parseSize(args[1])
c.pa.size = parseSize(args[2])
c.pa.hdb = args[1]
c.pa.szb = args[2]
case 4:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.hdr = parseSize(args[2])
c.pa.size = parseSize(args[3])
c.pa.hdb = args[2]
c.pa.szb = args[3]
default:
return fmt.Errorf("processHeaderPub Parse Error: '%s'", arg)
}
if c.pa.hdr < 0 {
return fmt.Errorf("processHeaderPub Bad or Missing Header Size: '%s'", arg)
}
// If number overruns an int64, parseSize() will have returned a negative value
if c.pa.size < 0 {
return fmt.Errorf("processHeaderPub Bad or Missing Total Size: '%s'", arg)
}
if c.pa.hdr > c.pa.size {
return fmt.Errorf("processHeaderPub Header Size larger then TotalSize: '%s'", arg)
}
maxPayload := atomic.LoadInt32(&c.mpay)
// Use int64() to avoid int32 overrun...
if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func (c *client) processPub(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
// If number overruns an int64, parseSize() will have returned a negative value
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt32(&c.mpay)
// Use int64() to avoid int32 overrun...
if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) parseSub(argo []byte, noForward bool) error {
// Copy so we do not reference a potentially large buffer
// FIXME(dlc) - make more efficient.
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
var (
subject []byte
queue []byte
sid []byte
)
switch len(args) {
case 2:
subject = args[0]
queue = nil
sid = args[1]
case 3:
subject = args[0]
queue = args[1]
sid = args[2]
default:
return fmt.Errorf("processSub Parse Error: '%s'", arg)
}
// If there was an error, it has been sent to the client. We don't return an
// error here to not close the connection as a parsing error.
c.processSub(subject, queue, sid, nil, noForward)
return nil
}
func (c *client) processSub(subject, queue, bsid []byte, cb msgHandler, noForward bool) (*subscription, error) {
// Create the subscription
sub := &subscription{client: c, subject: subject, queue: queue, sid: bsid, icb: cb}
c.mu.Lock()
// Indicate activity.
c.in.subs++
// Grab connection type, account and server info.
kind := c.kind
acc := c.acc
srv := c.srv
sid := string(sub.sid)
// This check does not apply to SYSTEM or JETSTREAM or ACCOUNT clients (because they don't have a `nc`...)
if c.isClosed() && (kind != SYSTEM && kind != JETSTREAM && kind != ACCOUNT) {
c.mu.Unlock()
return nil, ErrConnectionClosed
}
// Check permissions if applicable.
if kind == CLIENT {
// First do a pass whether queue subscription is valid. This does not necessarily
// mean that it will not be able to plain subscribe.
//
// allow = ["foo"] -> can subscribe or queue subscribe to foo using any queue
// allow = ["foo v1"] -> can only queue subscribe to 'foo v1', no plain subs allowed.
// allow = ["foo", "foo v1"] -> can subscribe to 'foo' but can only queue subscribe to 'foo v1'
//
if sub.queue != nil {
if !c.canQueueSubscribe(string(sub.subject), string(sub.queue)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, ErrSubscribePermissionViolation
}
} else if !c.canSubscribe(string(sub.subject)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, ErrSubscribePermissionViolation
}
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil, ErrTooManySubs
}
var updateGWs bool
var err error
// Subscribe here.
es := c.subs[sid]
if es == nil {
c.subs[sid] = sub
if acc != nil && acc.sl != nil {
err = acc.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
} else {
updateGWs = c.srv.gateway.enabled
}
}
}
// Unlocked from here onward
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil, ErrMalformedSubject
} else if c.opts.Verbose && kind != SYSTEM {
c.sendOK()
}
// If it was already registered, return it.
if es != nil {
return es, nil
}
// No account just return.
if acc == nil {
return sub, nil
}
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
if noForward {
return sub, nil
}
// If we are routing and this is a local sub, add to the route map for the associated account.
if kind == CLIENT || kind == SYSTEM || kind == JETSTREAM || kind == ACCOUNT {
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, 1)
return sub, nil
}
// Used to pass stream import matches to addShadowSub
type ime struct {
im *streamImport
dyn bool
}
// If the client's account has stream imports and there are matches for
// this subscription's subject, then add shadow subscriptions in the
// other accounts that export this subject.
func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error {
if acc == nil {
return ErrMissingAccount
}
var (
_ims [16]ime
ims = _ims[:0]
tokens []string
tsa [32]string
hasWC bool
)
acc.mu.RLock()
// Loop over the import subjects. We have 3 scenarios. If we have an
// exact match or a superset match we should use the from field from
// the import. If we are a subset, we have to dynamically calculate
// the subject.
for _, im := range acc.imports.streams {
if im.invalid {
continue
}
subj := string(sub.subject)
if subj == im.to {
ims = append(ims, ime{im, false})
continue
}
if tokens == nil {
tokens = tsa[:0]
start := 0
for i := 0; i < len(subj); i++ {
// This is not perfect, but the test below will
// be more exact, this is just to trigger the
// additional test.
if subj[i] == pwc || subj[i] == fwc {
hasWC = true
} else if subj[i] == btsep {
tokens = append(tokens, subj[start:i])
start = i + 1
}
}
tokens = append(tokens, subj[start:])
}
if isSubsetMatch(tokens, im.to) {
ims = append(ims, ime{im, true})
} else if hasWC && subjectIsSubsetMatch(im.to, subj) {
ims = append(ims, ime{im, false})
}
}
acc.mu.RUnlock()
var shadow []*subscription
if len(ims) > 0 {
shadow = make([]*subscription, 0, len(ims))
}
// Now walk through collected stream imports that matched.
for i := 0; i < len(ims); i++ {
ime := &ims[i]
// We will create a shadow subscription.
nsub, err := c.addShadowSub(sub, ime)
if err != nil {
return err
}
shadow = append(shadow, nsub)
}
if shadow != nil {
c.mu.Lock()
sub.shadow = shadow
c.mu.Unlock()
}
return nil
}
// Add in the shadow subscription.
func (c *client) addShadowSub(sub *subscription, ime *ime) (*subscription, error) {
im := ime.im
nsub := *sub // copy
nsub.im = im
// Check if we need to change shadow subscription's subject.
if !im.usePub {
if ime.dyn {
if im.rtr == nil {
im.rtr = im.tr.reverse()
}
subj, err := im.rtr.transformSubject(string(nsub.subject))
if err != nil {
return nil, err
}
nsub.subject = []byte(subj)
} else {
nsub.subject = []byte(im.from)
}
}
c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
if err := im.acc.sl.Insert(&nsub); err != nil {
errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
c.Debugf(errs)
return nil, fmt.Errorf(errs)
}
// Update our route map here.
c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1)
if c.srv.gateway.enabled {
c.srv.gatewayUpdateSubInterest(im.acc.Name, &nsub, 1)
}
c.srv.updateLeafNodes(im.acc, &nsub, 1)
return &nsub, nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(subject string) bool {
if c.perms == nil {
return true
}
allowed := true
// Check allow list. If no allow list that means all are allowed. Deny can overrule.
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
allowed = len(r.psubs) != 0
}
// If we have a deny list and we think we are allowed, check that as well.
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
allowed = len(r.psubs) == 0
// We use the actual subscription to signal us to spin up the deny mperms
// and cache. We check if the subject is a wildcard that contains any of
// the deny clauses.
// FIXME(dlc) - We could be smarter and track when these go away and remove.
if allowed && c.mperms == nil && subjectHasWildcard(subject) {
// Whip through the deny array and check if this wildcard subject is within scope.
for _, sub := range c.darray {
tokens := strings.Split(sub, tsep)
if isSubsetMatch(tokens, sub) {
c.loadMsgDenyFilter()
break
}
}
}
}
return allowed
}
func queueMatches(queue string, qsubs [][]*subscription) bool {
if len(qsubs) == 0 {
return true
}
for _, qsub := range qsubs {
qs := qsub[0]
qname := string(qs.queue)
// NOTE: '*' and '>' tokens can also be valid
// queue names so we first check against the
// literal name. e.g. v1.* == v1.*
if queue == qname || (subjectHasWildcard(qname) && subjectIsSubsetMatch(queue, qname)) {
return true
}
}
return false
}
func (c *client) canQueueSubscribe(subject, queue string) bool {
if c.perms == nil {
return true
}
allowed := true
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) > 0
if len(r.qsubs) > 0 {
// If the queue appears in the allow list, then DO allow.
allowed = queueMatches(queue, r.qsubs)
}
}
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) == 0
if len(r.qsubs) > 0 {
// If the queue appears in the deny list, then DO NOT allow.
allowed = !queueMatches(queue, r.qsubs)
}
}
return allowed
}
// Low level unsubscribe for a given client.
func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool) {
c.mu.Lock()
if !force && sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received",
string(sub.subject), sub.max, sub.nm)
c.mu.Unlock()
return
}
if c.trace {
c.traceOp("<-> %s", "DELSUB", sub.sid)
}
if c.kind != CLIENT && c.kind != SYSTEM {
c.removeReplySubTimeout(sub)
}
// Remove accounting if requested. This will be false when we close a connection
// with open subscriptions.
if remove {
delete(c.subs, string(sub.sid))
if acc != nil {
acc.sl.Remove(sub)
}
}
// Check to see if we have shadow subscriptions.
var updateRoute bool
var updateGWs bool
shadowSubs := sub.shadow
sub.shadow = nil
if len(shadowSubs) > 0 {
updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
if updateRoute {
updateGWs = c.srv.gateway.enabled
}
}
sub.close()
c.mu.Unlock()
// Process shadow subs if we have them.
for _, nsub := range shadowSubs {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else {
if updateRoute {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
if updateGWs {
c.srv.gatewayUpdateSubInterest(nsub.im.acc.Name, nsub, -1)
}
}
// Now check on leafnode updates.
c.srv.updateLeafNodes(nsub.im.acc, nsub, -1)
}
// Now check to see if this was part of a respMap entry for service imports.
if acc != nil {
acc.checkForReverseEntry(string(sub.subject), nil, true)
}
}
func (c *client) processUnsub(arg []byte) error {
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
var sub *subscription
var ok, unsub bool
c.mu.Lock()
// Indicate activity.
c.in.subs++
// Grab connection type.
kind := c.kind
srv := c.srv
var acc *Account
updateGWs := false
if sub, ok = c.subs[string(sid)]; ok {
acc = c.acc
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
unsub = true
}
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if unsub {
c.unsubscribe(acc, sub, false, true)
if acc != nil && kind == CLIENT || kind == SYSTEM || kind == ACCOUNT {
srv.updateRouteSubscriptionMap(acc, sub, -1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
}
return nil
}
// checkDenySub will check if we are allowed to deliver this message in the
// presence of deny clauses for subscriptions. Deny clauses will not prevent
// larger scoped wildcard subscriptions, so we need to check at delivery time.
// Lock should be held.
func (c *client) checkDenySub(subject string) bool {
if denied, ok := c.mperms.dcache[subject]; ok {
return denied
} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
c.mperms.dcache[subject] = true
return true
} else {
c.mperms.dcache[subject] = false
}
if len(c.mperms.dcache) > maxDenyPermCacheSize {
c.pruneDenyCache()
}
return false
}
// Create a message header for routes or leafnodes. Header and origin cluster aware.
func (c *client) msgHeaderForRouteOrLeaf(subj, reply []byte, rt *routeTarget, acc *Account) []byte {
hasHeader := c.pa.hdr > 0
canReceiveHeader := rt.sub.client.headers
mh := c.msgb[:msgHeadProtoLen]
kind := rt.sub.client.kind
var lnoc bool
if kind == ROUTER {
// If we are coming from a leaf with an origin cluster we need to handle differently
// if we can. We will send a route based LMSG which has origin cluster and headers
// by default.
if c.kind == LEAF && c.remoteCluster() != _EMPTY_ && rt.sub.client.route.lnoc {
mh[0] = 'L'
mh = append(mh, c.remoteCluster()...)
mh = append(mh, ' ')
lnoc = true
} else {
// Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite.
mh[0] = 'R'
}
mh = append(mh, acc.Name...)
mh = append(mh, ' ')
} else {
// Leaf nodes are LMSG
mh[0] = 'L'
// Remap subject if its a shadow subscription, treat like a normal client.
if rt.sub.im != nil {
if rt.sub.im.tr != nil {
to, _ := rt.sub.im.tr.transformSubject(string(subj))
subj = []byte(to)
} else {
subj = []byte(rt.sub.im.to)
}
}
}
mh = append(mh, subj...)
mh = append(mh, ' ')
if len(rt.qs) > 0 {
if reply != nil {
mh = append(mh, "+ "...) // Signal that there is a reply.
mh = append(mh, reply...)
mh = append(mh, ' ')
} else {
mh = append(mh, "| "...) // Only queues
}
mh = append(mh, rt.qs...)
} else if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
if lnoc {
// leafnode origin LMSG always have a header entry even if zero.
if c.pa.hdr <= 0 {
mh = append(mh, '0')
} else {
mh = append(mh, c.pa.hdb...)
}
mh = append(mh, ' ')
mh = append(mh, c.pa.szb...)
} else if hasHeader {
if canReceiveHeader {
mh[0] = 'H'
mh = append(mh, c.pa.hdb...)
mh = append(mh, ' ')
mh = append(mh, c.pa.szb...)
} else {
// If we are here we need to truncate the payload size
nsz := strconv.Itoa(c.pa.size - c.pa.hdr)
mh = append(mh, nsz...)
}
} else {
mh = append(mh, c.pa.szb...)
}
return append(mh, _CRLF_...)
}
// Create a message header for clients. Header aware.
func (c *client) msgHeader(subj, reply []byte, sub *subscription) []byte {
// See if we should do headers. We have to have a headers msg and
// the client we are going to deliver to needs to support headers as well.
hasHeader := c.pa.hdr > 0
canReceiveHeader := sub.client != nil && sub.client.headers
var mh []byte
if hasHeader && canReceiveHeader {
mh = c.msgb[:msgHeadProtoLen]
mh[0] = 'H'
} else {
mh = c.msgb[1:msgHeadProtoLen]
}
mh = append(mh, subj...)
mh = append(mh, ' ')
if len(sub.sid) > 0 {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
}
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
if hasHeader {
if canReceiveHeader {
mh = append(mh, c.pa.hdb...)
mh = append(mh, ' ')
mh = append(mh, c.pa.szb...)
} else {
// If we are here we need to truncate the payload size
nsz := strconv.Itoa(c.pa.size - c.pa.hdr)
mh = append(mh, nsz...)
}
} else {
mh = append(mh, c.pa.szb...)
}
mh = append(mh, _CRLF_...)
return mh
}
func (c *client) stalledWait(producer *client) {
stall := c.out.stc
ttl := stallDuration(c.out.pb, c.out.mp)
c.mu.Unlock()
defer c.mu.Lock()
select {
case <-stall:
case <-time.After(ttl):
producer.Debugf("Timed out of fast producer stall (%v)", ttl)
}
}
func stallDuration(pb, mp int64) time.Duration {
ttl := stallClientMinDuration
if pb >= mp {
ttl = stallClientMaxDuration
} else if hmp := mp / 2; pb > hmp {
bsz := hmp / 10
additional := int64(ttl) * ((pb - hmp) / bsz)
ttl += time.Duration(additional)
}
return ttl
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
// deliverMsg will deliver a message to a matching subscription and its underlying client.
// We process all connection/client types. mh is the part that will be protocol/client specific.
func (c *client) deliverMsg(sub *subscription, subject, reply, mh, msg []byte, gwrply bool) bool {
if sub.client == nil {
return false
}
client := sub.client
client.mu.Lock()
// Check echo
if c == client && !client.echo {
client.mu.Unlock()
return false
}
// Check if we have a subscribe deny clause. This will trigger us to check the subject
// for a match against the denied subjects.
if client.mperms != nil && client.checkDenySub(string(subject)) {
client.mu.Unlock()
return false
}
// New race detector forces this now.
if sub.isClosed() {
client.mu.Unlock()
return false
}
// Check if we are a leafnode and have perms to check.
if client.kind == LEAF && client.perms != nil {
if !client.pubAllowed(string(subject)) {
client.mu.Unlock()
return false
}
}
srv := client.srv
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
if client.kind == ROUTER && sub.nm >= sub.max {
// The only router based messages that we will see here are remoteReplies.
// We handle these slightly differently.
defer client.removeReplySub(sub)
} else {
// For routing..
shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
if srv.gateway.enabled {
defer srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1)
}
defer srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
defer client.unsubscribe(client.acc, sub, true, true)
} else if sub.nm > sub.max {
client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max)
client.mu.Unlock()
client.unsubscribe(client.acc, sub, true, true)
if shouldForward {
srv.updateRouteSubscriptionMap(client.acc, sub, -1)
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1)
}
}
return false
}
}
}
// Check here if we have a header with our message. If this client can not
// support we need to strip the headers from the payload.
// The actual header would have been processed correctly for us, so just
// need to update payload.
if c.pa.hdr > 0 && !sub.client.headers {
msg = msg[c.pa.hdr:]
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg))
prodIsMQTT := c.isMqtt()
// MQTT producers send messages without CR_LF, so don't remove it for them.
if !prodIsMQTT {
msgSize -= int64(LEN_CR_LF)
}
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
// Check for internal subscriptions.
if sub.icb != nil {
if gwrply {
// Note that we keep track of the GW routed reply in the destination
// connection (`client`). The routed reply subject is in `c.pa.reply`,
// should that change, we would have to pass the GW routed reply as
// a parameter of deliverMsg().
srv.trackGWReply(client, c.pa.reply)
}
client.mu.Unlock()
// Internal account clients are for service imports and need the '\r\n'.
if client.kind == ACCOUNT {
sub.icb(sub, c, string(subject), string(reply), msg)
} else {
sub.icb(sub, c, string(subject), string(reply), msg[:msgSize])
}
return true
}
// We don't count internal deliveries so we update server statistics here.
atomic.AddInt64(&srv.outMsgs, 1)
atomic.AddInt64(&srv.outBytes, msgSize)
// If we are a client and we detect that the consumer we are
// sending to is in a stalled state, go ahead and wait here
// with a limit.
if c.kind == CLIENT && client.out.stc != nil {
client.stalledWait(c)
}
// Check for closed connection
if client.isClosed() {
client.mu.Unlock()
return false
}
// Do a fast check here to see if we should be tracking this from a latency
// perspective. This will be for a request being received for an exported service.
// This needs to be from a non-client (otherwise tracking happens at requestor).
//
// Also this check captures if the original reply (c.pa.reply) is a GW routed
// reply (since it is known to be > minReplyLen). If that is the case, we need to
// track the binding between the routed reply and the reply set in the message
// header (which is c.pa.reply without the GNR routing prefix).
if client.kind == CLIENT && len(c.pa.reply) > minReplyLen {
if gwrply {
// Note that we keep track of the GW routed reply in the destination
// connection (`client`). The routed reply subject is in `c.pa.reply`,
// should that change, we would have to pass the GW routed reply as
// a parameter of deliverMsg().
srv.trackGWReply(client, c.pa.reply)
}
// If we do not have a registered RTT queue that up now.
if client.rtt == 0 {
client.sendRTTPingLocked()
}
// FIXME(dlc) - We may need to optimize this.
// We will have tagged this with a suffix ('.T') if we are tracking. This is
// needed from sampling. Not all will be tracked.
if c.kind != CLIENT && isTrackedReply(c.pa.reply) {
client.trackRemoteReply(string(subject), string(c.pa.reply))
}
}
// Queue to outbound buffer
client.queueOutbound(mh)
client.queueOutbound(msg)
if prodIsMQTT {
// Need to add CR_LF since MQTT producers don't send CR_LF
client.queueOutbound([]byte(CR_LF))
}
client.out.pm++
// If we are tracking dynamic publish permissions that track reply subjects,
// do that accounting here. We only look at client.replies which will be non-nil.
if client.replies != nil && len(reply) > 0 {
client.replies[string(reply)] = &resp{time.Now(), 0}
if len(client.replies) > replyPermLimit {
client.pruneReplyPerms()
}
}
// Check outbound threshold and queue IO flush if needed.
// This is specifically looking at situations where we are getting behind and may want
// to intervene before this producer goes back to top of readloop. We are in the producer's
// readloop go routine at this point.
// FIXME(dlc) - We may call this alot, maybe suppress after first call?
if client.out.pm > 1 && client.out.pb > maxBufSize*2 {
client.flushSignal()
}
// Add the data size we are responsible for here. This will be processed when we
// return to the top of the readLoop.
c.addToPCD(client)
if client.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
client.mu.Unlock()
return true
}
// Add the given sub's client to the list of clients that need flushing.
// This must be invoked from `c`'s readLoop. No lock for c is required,
// however, `client` lock must be held on entry. This holds true even
// if `client` is same than `c`.
func (c *client) addToPCD(client *client) {
if _, ok := c.pcd[client]; !ok {
client.out.fsp++
c.pcd[client] = needFlush
}
}
// This will track a remote reply for an exported service that has requested
// latency tracking.
// Lock assumed to be held.
func (c *client) trackRemoteReply(subject, reply string) {
a := c.acc
if a == nil {
return
}
var lrt time.Duration
var respThresh time.Duration
a.mu.RLock()
se := a.getServiceExport(subject)
if se != nil {
lrt = a.lowestServiceExportResponseTime()
respThresh = se.respThresh
}
a.mu.RUnlock()
if se == nil {
return
}
if c.rrTracking == nil {
c.rrTracking = &rrTracking{
rmap: make(map[string]*remoteLatency),
ptmr: time.AfterFunc(lrt, c.pruneRemoteTracking),
lrt: lrt,
}
}
rl := remoteLatency{
Account: a.Name,
ReqId: reply,
respThresh: respThresh,
}
rl.M2.RequestStart = time.Now().UTC()
c.rrTracking.rmap[reply] = &rl
}
// pruneRemoteTracking will prune any remote tracking objects
// that are too old. These are orphaned when a service is not
// sending reponses etc.
// Lock should be held upon entry.
func (c *client) pruneRemoteTracking() {
c.mu.Lock()
if c.rrTracking == nil {
c.mu.Unlock()
return
}
now := time.Now()
for subject, rl := range c.rrTracking.rmap {
if now.After(rl.M2.RequestStart.Add(rl.respThresh)) {
delete(c.rrTracking.rmap, subject)
}
}
if len(c.rrTracking.rmap) > 0 {
t := c.rrTracking.ptmr
t.Stop()
t.Reset(c.rrTracking.lrt)
} else {
c.rrTracking.ptmr.Stop()
c.rrTracking = nil
}
c.mu.Unlock()
}
// pruneReplyPerms will remove any stale or expired entries
// in our reply cache. We make sure to not check too often.
func (c *client) pruneReplyPerms() {
// Make sure we do not check too often.
if c.perms.resp == nil {
return
}
mm := c.perms.resp.MaxMsgs
ttl := c.perms.resp.Expires
now := time.Now()
for k, resp := range c.replies {
if mm > 0 && resp.n >= mm {
delete(c.replies, k)
} else if ttl > 0 && now.Sub(resp.t) > ttl {
delete(c.replies, k)
}
}
}
// pruneDenyCache will prune the deny cache via randomly
// deleting items. Doing so pruneSize items at a time.
// Lock must be held for this one since it is shared under
// deliverMsg.
func (c *client) pruneDenyCache() {
r := 0
for subject := range c.mperms.dcache {
delete(c.mperms.dcache, subject)
if r++; r > pruneSize {
break
}
}
}
// prunePubPermsCache will prune the cache via randomly
// deleting items. Doing so pruneSize items at a time.
func (c *client) prunePubPermsCache() {
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
if r++; r > pruneSize {
break
}
}
}
// pubAllowed checks on publish permissioning.
// Lock should not be held.
func (c *client) pubAllowed(subject string) bool {
return c.pubAllowedFullCheck(subject, true)
}
// pubAllowedFullCheck checks on all publish permissioning depending
// on the flag for dynamic reply permissions.
func (c *client) pubAllowedFullCheck(subject string, fullCheck bool) bool {
if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
return true
}
// Check if published subject is allowed if we have permissions in place.
allowed, ok := c.perms.pcache[subject]
if ok {
return allowed
}
// Cache miss, check allow then deny as needed.
if c.perms.pub.allow != nil {
r := c.perms.pub.allow.Match(subject)
allowed = len(r.psubs) != 0
} else {
// No entries means all are allowed. Deny will overrule as needed.
allowed = true
}
// If we have a deny list and are currently allowed, check that as well.
if allowed && c.perms.pub.deny != nil {
r := c.perms.pub.deny.Match(subject)
allowed = len(r.psubs) == 0
}
// If we are currently not allowed but we are tracking reply subjects
// dynamically, check to see if we are allowed here but avoid pcache.
// We need to acquire the lock though.
if !allowed && fullCheck && c.perms.resp != nil {
c.mu.Lock()
if resp := c.replies[subject]; resp != nil {
resp.n++
// Check if we have sent too many responses.
if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs {
delete(c.replies, subject)
} else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires {
delete(c.replies, subject)
} else {
allowed = true
}
}
c.mu.Unlock()
} else {
// Update our cache here.
c.perms.pcache[string(subject)] = allowed
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
c.prunePubPermsCache()
}
}
return allowed
}
// Test whether a reply subject is a service import reply.
func isServiceReply(reply []byte) bool {
// This function is inlined and checking this way is actually faster
// than byte-by-byte comparison.
return len(reply) > 3 && string(reply[:4]) == replyPrefix
}
// Test whether a reply subject is a service import or a gateway routed reply.
func isReservedReply(reply []byte) bool {
if isServiceReply(reply) {
return true
}
// Faster to check with string([:]) than byte-by-byte
if len(reply) > gwReplyPrefixLen && string(reply[:gwReplyPrefixLen]) == gwReplyPrefix {
return true
}
return false
}
// This will decide to call the client code or router code.
func (c *client) processInboundMsg(msg []byte) {
switch c.kind {
case CLIENT:
c.processInboundClientMsg(msg)
case ROUTER:
c.processInboundRoutedMsg(msg)
case GATEWAY:
c.processInboundGatewayMsg(msg)
case LEAF:
c.processInboundLeafMsg(msg)
}
}
// selectMappedSubject will chose the mapped subject based on the client's inbound subject.
func (c *client) selectMappedSubject() bool {
nsubj, changed := c.acc.selectMappedSubject(string(c.pa.subject))
if changed {
c.pa.subject = []byte(nsubj)
}
return changed
}
// processInboundClientMsg is called to process an inbound msg from a client.
func (c *client) processInboundClientMsg(msg []byte) bool {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += int32(len(msg) - LEN_CR_LF)
// Check that client (could be here with SYSTEM) is not publishing on reserved "$GNR" prefix.
if c.kind == CLIENT && hasGWRoutedReplyPrefix(c.pa.subject) {
c.pubPermissionViolation(c.pa.subject)
return false
}
// Mostly under testing scenarios.
if c.srv == nil || c.acc == nil {
return false
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return false
}
// Now check for reserved replies. These are used for service imports.
if len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) {
c.replySubjectViolation(c.pa.reply)
return false
}
if c.opts.Verbose {
c.sendOK()
}
// If MQTT client, check for retain flag now that we have passed permissions check
if c.isMqtt() {
c.mqttHandlePubRetain()
}
// Check if this client's gateway replies map is not empty
if atomic.LoadInt32(&c.cgwrt) > 0 && c.handleGWReplyMap(msg) {
return true
}
// If we have an exported service and we are doing remote tracking, check this subject
// to see if we need to report the latency.
if c.rrTracking != nil {
c.mu.Lock()
rl := c.rrTracking.rmap[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking.rmap, string(c.pa.subject))
}
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.Status = 200
sl.Responder = c.getClientInfo(true)
sl.ServiceLatency = time.Since(sl.RequestStart) - sl.Responder.RTT
sl.TotalLatency = sl.ServiceLatency + sl.Responder.RTT
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, rl) // Send to SYS account
}
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Indication if we attempted to deliver the message to anyone.
var didDeliver bool
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If there are matching queue subs and we are in gateway mode,
// we need to keep track of the queue names the messages are
// delivered to. When sending to the GWs, the RMSG will include
// those names so that the remote clusters do not deliver messages
// to their queue subs of the same names.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
didDeliver, qnames = c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
didDeliver = c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, qnames) || didDeliver
}
// Check to see if we did not deliver to anyone and the client has a reply subject set
// and wants notification of no_responders.
if !didDeliver && len(c.pa.reply) > 0 {
c.mu.Lock()
if c.opts.NoResponders {
if sub := c.subForReply(c.pa.reply); sub != nil {
proto := fmt.Sprintf("HMSG %s %s 16 16\r\nNATS/1.0 503\r\n\r\n\r\n", c.pa.reply, sub.sid)
c.queueOutbound([]byte(proto))
c.addToPCD(c)
}
}
c.mu.Unlock()
}
return didDeliver
}
// Return the subscription for this reply subject. Only look at normal subs for this client.
func (c *client) subForReply(reply []byte) *subscription {
r := c.acc.sl.Match(string(reply))
for _, sub := range r.psubs {
if sub.client == c {
return sub
}
}
return nil
}
// This is invoked knowing that this client has some GW replies
// in its map. It will check if one is find for the c.pa.subject
// and if so will process it directly (send to GWs and LEAFs) and
// return true to notify the caller that the message was handled.
// If there is no mapping for the subject, false is returned.
func (c *client) handleGWReplyMap(msg []byte) bool {
c.mu.Lock()
rm, ok := c.gwrm[string(c.pa.subject)]
if !ok {
c.mu.Unlock()
return false
}
// Set subject to the mapped reply subject
c.pa.subject = []byte(rm.ms)
var rl *remoteLatency
if c.rrTracking != nil {
rl = c.rrTracking.rmap[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking.rmap, string(c.pa.subject))
}
}
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.Status = 200
sl.Responder = c.getClientInfo(true)
sl.ServiceLatency = time.Since(sl.RequestStart) - sl.Responder.RTT
sl.TotalLatency = sl.ServiceLatency + sl.Responder.RTT
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, rl) // Send to SYS account
}
// Check for leaf nodes
if c.srv.gwLeafSubs.Count() > 0 {
if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 {
c.processMsgResults(c.acc, r, msg, nil, c.pa.subject, c.pa.reply, pmrNoFlag)
}
}
if c.srv.gateway.enabled {
c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, nil)
}
return true
}
// Used to setup the response map for a service import request that has a reply subject.
func (c *client) setupResponseServiceImport(acc *Account, si *serviceImport, tracking bool, header http.Header) *serviceImport {
rsi := si.acc.addRespServiceImport(acc, string(c.pa.reply), si, tracking, header)
if si.latency != nil {
if c.rtt == 0 {
// We have a service import that we are tracking but have not established RTT.
c.sendRTTPing()
}
si.acc.mu.Lock()
rsi.rc = c
si.acc.mu.Unlock()
}
return rsi
}
// This will set a header for the message.
// Lock does not need to be held but this should only be called
// from the inbound go routine. We will update the pubArgs.
func (c *client) setHeader(key, value string, msg []byte) []byte {
const hdrLine = "NATS/1.0\r\n"
var bb bytes.Buffer
var omi int
// Write original header if present.
if c.pa.hdr > LEN_CR_LF {
omi = c.pa.hdr
bb.Write(msg[:c.pa.hdr-LEN_CR_LF])
} else {
bb.WriteString(hdrLine)
}
http.Header{key: []string{value}}.Write(&bb)
bb.WriteString(CR_LF)
nhdr := bb.Len()
// Put the original message back.
bb.Write(msg[omi:])
nsize := bb.Len() - LEN_CR_LF
// Update pubArgs
// If others will use this later we need to save and restore original.
c.pa.hdr = nhdr
c.pa.size = nsize
c.pa.hdb = []byte(strconv.Itoa(nhdr))
c.pa.szb = []byte(strconv.Itoa(nsize))
return bb.Bytes()
}
// processServiceImport is an internal callback when a subscription matches an imported service
// from another account. This includes response mappings as well.
func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) {
// If we are a GW and this is not a direct serviceImport ignore.
isResponse := si.isRespServiceImport()
if c.kind == GATEWAY && !isResponse {
return
}
// If we are here and we are a serviceImport response make sure we are not matching back
// to the import/export pair that started the request. If so ignore.
if isResponse && c.pa.psi != nil && c.pa.psi.se == si.se {
return
}
acc.mu.RLock()
shouldReturn := si.invalid || acc.sl == nil
acc.mu.RUnlock()
if shouldReturn {
return
}
var nrr []byte
var rsi *serviceImport
// Check if there is a reply present and set up a response.
// TODO(dlc) - restrict to configured service imports and not responses?
tracking, headers := shouldSample(si.latency, c)
if len(c.pa.reply) > 0 {
if rsi = c.setupResponseServiceImport(acc, si, tracking, headers); rsi != nil {
nrr = []byte(rsi.from)
}
} else {
// Check to see if this was a bad request with no reply and we were supposed to be tracking.
if !si.response && si.latency != nil && tracking {
si.acc.sendBadRequestTrackingLatency(si, c, headers)
}
}
// Send tracking info here if we are tracking this response.
// This is always a response.
var didSendTL bool
if si.tracking {
// Stamp that we attempted delivery.
si.didDeliver = true
didSendTL = acc.sendTrackingLatency(si, c)
}
// Pick correct to subject. If we matched on a wildcard use the literal publish subject.
to := si.to
if si.tr != nil {
// FIXME(dlc) - This could be slow, may want to look at adding cache to bare transforms?
to, _ = si.tr.transformSubject(string(c.pa.subject))
} else if si.usePub {
to = string(c.pa.subject)
}
// Now check to see if this account has mappings that could affect the service import.
// Can't use non-locked trick like in processInboundClientMsg, so just call into selectMappedSubject
// so we only lock once.
to, _ = si.acc.selectMappedSubject(to)
// Copy our pubArg and account
pacopy := c.pa
oacc := c.acc
// Change this so that we detect recursion
c.pa.psi = si
// Place our client info for the request in the message.
// This will survive going across routes, etc.
if c.pa.proxy == nil && !si.response {
if ci := c.getClientInfo(si.share); ci != nil {
if b, _ := json.Marshal(ci); b != nil {
msg = c.setHeader(ClientInfoHdr, string(b), msg)
}
}
}
// Set our reply.
c.pa.reply = nrr
// For processing properly across routes, etc.
if c.kind == CLIENT || c.kind == LEAF {
c.pa.proxy = c.acc
}
c.mu.Lock()
c.acc = si.acc
c.mu.Unlock()
// FIXME(dlc) - Do L1 cache trick like normal client?
rr := si.acc.sl.Match(to)
// If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we
// need to handle that since the processMsgResults will want a queue filter.
flags := pmrMsgImportedFromService
if c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF {
flags |= pmrIgnoreEmptyQueueFilter
}
// We will be calling back into processMsgResults since we are now being called as a normal sub.
// We need to take care of the c.in.rts, so save off what is there and use a local version. We
// will put back what was there after.
orts := c.in.rts
var lrts [routeTargetInit]routeTarget
c.in.rts = lrts[:0]
var didDeliver bool
// If this is not a gateway connection but gateway is enabled,
// try to send this converted message to all gateways.
if c.srv.gateway.enabled {
flags |= pmrCollectQueueNames
var queues [][]byte
didDeliver, queues = c.processMsgResults(si.acc, rr, msg, nil, []byte(to), nrr, flags)
didDeliver = c.sendMsgToGateways(si.acc, msg, []byte(to), nrr, queues) || didDeliver
} else {
didDeliver, _ = c.processMsgResults(si.acc, rr, msg, nil, []byte(to), nrr, flags)
}
// Put what was there back now.
c.in.rts = orts
c.pa = pacopy
c.mu.Lock()
c.acc = oacc
c.mu.Unlock()
// Determine if we should remove this service import. This is for response service imports.
// We will remove if we did not deliver, or if we are a response service import and we are
// a singleton, or we have an EOF message.
shouldRemove := !didDeliver || (si.response && (si.rt == Singleton || len(msg) == LEN_CR_LF))
// If we are tracking and we did not actually send the latency info we need to suppress the removal.
if si.tracking && !didSendTL {
shouldRemove = false
}
// If we are streamed or chunked we need to update our timestamp to avoid cleanup.
if si.rt != Singleton && didDeliver {
acc.mu.Lock()
si.ts = time.Now().UnixNano()
acc.mu.Unlock()
}
// Cleanup of a response service import
if shouldRemove {
reason := rsiOk
if !didDeliver {
reason = rsiNoDelivery
}
if si.isRespServiceImport() {
acc.removeRespServiceImport(si, reason)
} else {
// This is a main import and since we could not even deliver to the exporting account
// go ahead and remove the respServiceImport we created above.
si.acc.removeRespServiceImport(rsi, reason)
}
}
}
func (c *client) addSubToRouteTargets(sub *subscription) {
if c.in.rts == nil {
c.in.rts = make([]routeTarget, 0, routeTargetInit)
}
for i := range c.in.rts {
rt := &c.in.rts[i]
if rt.sub.client == sub.client {
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
return
}
}
var rt *routeTarget
lrts := len(c.in.rts)
// If we are here we do not have the sub yet in our list
// If we have to grow do so here.
if lrts == cap(c.in.rts) {
c.in.rts = append(c.in.rts, routeTarget{})
}
c.in.rts = c.in.rts[:lrts+1]
rt = &c.in.rts[lrts]
rt.sub = sub
rt.qs = rt._qs[:0]
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
}
// This processes the sublist results for a given message.
// Returns if the message was delivered to at least target and queue filters.
func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, subject, reply []byte, flags int) (bool, [][]byte) {
// For sending messages across routes and leafnodes.
// Reset if we have one since we reuse this data structure.
if c.in.rts != nil {
c.in.rts = c.in.rts[:0]
}
var rplyHasGWPrefix bool
var creply = reply
// If the reply subject is a GW routed reply, we will perform some
// tracking in deliverMsg(). We also want to send to the user the
// reply without the prefix. `creply` will be set to that and be
// used to create the message header for client connections.
if rplyHasGWPrefix = isGWRoutedReply(reply); rplyHasGWPrefix {
creply = reply[gwSubjectOffset:]
}
// With JetStream we now have times where we want to match a subscription
// on one subject, but deliver it with another. e.g. JetStream deliverables.
// This only works for last mile, meaning to a client. For other types we need
// to use the original subject.
subj := subject
if len(deliver) > 0 {
subj = deliver
}
// Check for JetStream encoded reply subjects.
// For now these will only be on $JS.ACK prefixed reply subjects.
if len(creply) > 0 &&
c.kind != CLIENT && c.kind != SYSTEM && c.kind != JETSTREAM && c.kind != ACCOUNT &&
bytes.HasPrefix(creply, []byte(jsAckPre)) {
// We need to rewrite the subject and the reply.
if li := bytes.LastIndex(creply, []byte("@")); li != 0 && li < len(creply)-1 {
subj, creply = creply[li+1:], creply[:li]
}
}
var didDeliver bool
// delivery subject for clients
var dsubj []byte
// Used as scratch if mapping
var _dsubj [64]byte
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER. We now process
// these after everything else.
switch sub.client.kind {
case ROUTER:
if (c.kind != ROUTER && !c.isSpokeLeafNode()) || (flags&pmrAllowSendFromRouteToRoute != 0) {
c.addSubToRouteTargets(sub)
}
continue
case GATEWAY:
// Never send to gateway from here.
continue
case LEAF:
// We handle similarly to routes and use the same data structures.
// Leaf node delivery audience is different however.
// Also leaf nodes are always no echo, so we make sure we are not
// going to send back to ourselves here.
if c != sub.client && (c.kind != ROUTER || sub.client.isHubLeafNode()) {
c.addSubToRouteTargets(sub)
}
continue
}
// Assume delivery subject is the normal subject to this point.
dsubj = subj
// Check for stream import mapped subs (shadow subs). These apply to local subs only.
if sub.im != nil {
// If this message was a service import do not re-export to an exported stream.
if flags&pmrMsgImportedFromService != 0 {
continue
}
if sub.im.tr != nil {
to, _ := sub.im.tr.transformSubject(string(dsubj))
dsubj = append(_dsubj[:0], to...)
} else if sub.im.usePub {
dsubj = append(_dsubj[:0], subj...)
} else {
dsubj = append(_dsubj[:0], sub.im.to...)
}
// If we are mapping for a deliver subject we will reverse roles.
// The original subj we set from above is correct for the msg header,
// but we need to transform the deliver subject to properly route.
if len(deliver) > 0 {
dsubj, subj = subj, dsubj
}
}
// Remap to the original subject if internal.
if sub.icb != nil {
subj = subject
}
// Normal delivery
mh := c.msgHeader(dsubj, creply, sub)
didDeliver = c.deliverMsg(sub, subj, creply, mh, msg, rplyHasGWPrefix) || didDeliver
}
// Set these up to optionally filter based on the queue lists.
// This is for messages received from routes which will have directed
// guidance on which queue groups we should deliver to.
qf := c.pa.queues
// Declared here because of goto.
var queues [][]byte
// For all routes/leaf/gateway connections, we may still want to send messages to
// leaf nodes or routes even if there are no queue filters since we collect
// them above and do not process inline like normal clients.
// However, do select queue subs if asked to ignore empty queue filter.
if (c.kind == LEAF || c.kind == ROUTER || c.kind == GATEWAY) && qf == nil && flags&pmrIgnoreEmptyQueueFilter == 0 {
goto sendToRoutesOrLeafs
}
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
// If we have a filter check that here. We could make this a map or someting more
// complex but linear search since we expect queues to be small. Should be faster
// and more cache friendly.
if qf != nil && len(qsubs) > 0 {
tqn := qsubs[0].queue
for _, qn := range qf {
if bytes.Equal(qn, tqn) {
goto selectQSub
}
}
continue
}
selectQSub:
// We will hold onto remote or lead qsubs when we are coming from
// a route or a leaf node just in case we can no longer do local delivery.
var rsub, sub *subscription
var _ql [32]*subscription
src := c.kind
// If we just came from a route we want to prefer local subs.
// So only select from local subs but remember the first rsub
// in case all else fails.
if src == ROUTER {
ql := _ql[:0]
for i := 0; i < len(qsubs); i++ {
sub = qsubs[i]
if sub.client.kind == LEAF || sub.client.kind == ROUTER {
if rsub == nil {
rsub = sub
}
} else {
ql = append(ql, sub)
}
}
qsubs = ql
}
sindex := 0
lqs := len(qsubs)
if lqs > 1 {
sindex = c.in.prand.Int() % lqs
}
// Find a subscription that is able to deliver this message starting at a random index.
for i := 0; i < lqs; i++ {
if sindex+i < lqs {
sub = qsubs[sindex+i]
} else {
sub = qsubs[(sindex+i)%lqs]
}
if sub == nil {
continue
}
// We have taken care of preferring local subs for a message from a route above.
// Here we just care about a client or leaf and skipping a leaf and preferring locals.
if dst := sub.client.kind; dst == ROUTER || dst == LEAF {
if (src == LEAF || src == CLIENT) && dst == LEAF {
if rsub == nil {
rsub = sub
}
continue
} else {
c.addSubToRouteTargets(sub)
// Clear rsub since we added a sub.
rsub = nil
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
}
break
}
// Assume delivery subject is normal subject to this point.
dsubj = subj
// Check for stream import mapped subs. These apply to local subs only.
if sub.im != nil {
// If this message was a service import do not re-export to an exported stream.
if flags&pmrMsgImportedFromService != 0 {
continue
}
if sub.im.tr != nil {
to, _ := sub.im.tr.transformSubject(string(subj))
dsubj = append(_dsubj[:0], to...)
} else if sub.im.usePub {
dsubj = append(_dsubj[:0], subj...)
} else {
dsubj = append(_dsubj[:0], sub.im.to...)
}
}
var rreply = reply
if rplyHasGWPrefix && sub.client.kind == CLIENT {
rreply = creply
}
// "rreply" will be stripped of the $GNR prefix (if present)
// for client connections only.
mh := c.msgHeader(dsubj, rreply, sub)
if c.deliverMsg(sub, subject, rreply, mh, msg, rplyHasGWPrefix) {
didDeliver = true
// Clear rsub
rsub = nil
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
break
}
}
if rsub != nil {
// If we are here we tried to deliver to a local qsub
// but failed. So we will send it to a remote or leaf node.
c.addSubToRouteTargets(rsub)
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, rsub.queue)
}
}
}
sendToRoutesOrLeafs:
// If no messages for routes or leafnodes return here.
if len(c.in.rts) == 0 {
return didDeliver, queues
}
// If we do have a deliver subject we need to do something with it.
// Again this is when JetStream (but possibly others) wants the system
// to rewrite the delivered subject. The way we will do that is place it
// at the end of the reply subject if it exists.
if len(deliver) > 0 && len(reply) > 0 {
reply = append(reply, '@')
reply = append(reply, deliver...)
}
// We address by index to avoid struct copy.
// We have inline structs for memory layout and cache coherency.
for i := range c.in.rts {
rt := &c.in.rts[i]
// Check if we have an origin cluster set from a leafnode message.
// If so make sure we do not send it back to the same cluster for a different
// leafnode. Cluster wide no echo.
if rt.sub.client.kind == LEAF {
// Check two scenarios. One is inbound from a route (c.pa.origin)
if c.kind == ROUTER && len(c.pa.origin) > 0 {
if string(c.pa.origin) == rt.sub.client.remoteCluster() {
continue
}
}
// The other is leaf to leaf.
if c.kind == LEAF {
src, dest := c.remoteCluster(), rt.sub.client.remoteCluster()
if src != _EMPTY_ && src == dest {
continue
}
}
}
mh := c.msgHeaderForRouteOrLeaf(subject, reply, rt, acc)
didDeliver = c.deliverMsg(rt.sub, subject, reply, mh, msg, false) || didDeliver
}
return didDeliver, queues
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject)
}
func (c *client) subPermissionViolation(sub *subscription) {
errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)
logTxt := fmt.Sprintf("Subscription Violation - %s, Subject %q, SID %s",
c.getAuthUser(), sub.subject, sub.sid)
if sub.queue != nil {
errTxt = fmt.Sprintf("Permissions Violation for Subscription to %q using queue %q", sub.subject, sub.queue)
logTxt = fmt.Sprintf("Subscription Violation - %s, Subject %q, Queue: %q, SID %s",
c.getAuthUser(), sub.subject, sub.queue, sub.sid)
}
c.sendErr(errTxt)
c.Errorf(logTxt)
}
func (c *client) replySubjectViolation(reply []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply))
c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply)
}
func (c *client) processPingTimer() {
c.mu.Lock()
c.ping.tmr = nil
// Check if connection is still opened
if c.isClosed() {
c.mu.Unlock()
return
}
c.Debugf("%s Ping Timer", c.typeString())
var sendPing bool
// If we have had activity within the PingInterval then
// there is no need to send a ping. This can be client data
// or if we received a ping from the other side.
pingInterval := c.srv.getOpts().PingInterval
if c.kind == GATEWAY {
pingInterval = adjustPingIntervalForGateway(pingInterval)
sendPing = true
}
now := time.Now()
needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL
// Do not delay PINGs for GATEWAY connections.
if c.kind != GATEWAY {
if delta := now.Sub(c.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second))
} else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second))
} else {
sendPing = true
}
}
if sendPing {
// Check for violation
if c.ping.out+1 > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.enqueueProto([]byte(fmt.Sprintf(errProto, "Stale Connection")))
c.mu.Unlock()
c.closeConnection(StaleConnection)
return
}
// Send PING
c.sendPing()
}
// Reset to fire again.
c.setPingTimer()
c.mu.Unlock()
}
// Returns the smallest value between the given `d` and `gatewayMaxPingInterval` durations.
// Invoked for connections known to be of GATEWAY type.
func adjustPingIntervalForGateway(d time.Duration) time.Duration {
if d > gatewayMaxPingInterval {
return gatewayMaxPingInterval
}
return d
}
// Lock should be held
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
if c.kind == GATEWAY {
d = adjustPingIntervalForGateway(d)
}
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ping.tmr == nil {
return
}
c.ping.tmr.Stop()
c.ping.tmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, c.authTimeout)
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
// We may reuse atmr for expiring user jwts,
// so check connectReceived.
// Lock assume held on entry.
func (c *client) awaitingAuth() bool {
return !c.flags.isSet(connectReceived) && c.atmr != nil
}
// This will set the atmr for the JWT expiration time.
// We will lock on entry.
func (c *client) setExpirationTimer(d time.Duration) {
c.mu.Lock()
c.atmr = time.AfterFunc(d, c.authExpired)
c.mu.Unlock()
}
// Possibly flush the connection and then close the low level connection.
// The boolean `minimalFlush` indicates if the flush operation should have a
// minimal write deadline.
// Lock is held on entry.
func (c *client) flushAndClose(minimalFlush bool) {
if !c.flags.isSet(skipFlushOnClose) && c.out.pb > 0 {
if minimalFlush {
const lowWriteDeadline = 100 * time.Millisecond
// Reduce the write deadline if needed.
if c.out.wdl > lowWriteDeadline {
c.out.wdl = lowWriteDeadline
}
}
c.flushOutbound()
}
c.out.p, c.out.s = nil, nil
// Close the low level connection. WriteDeadline need to be set
// in case this is a TLS connection.
if c.nc != nil {
c.nc.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
c.nc.Close()
c.nc = nil
}
}
func (c *client) typeString() string {
switch c.kind {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
case GATEWAY:
return "Gateway"
case LEAF:
return "Leafnode"
case JETSTREAM:
return "JetStream"
case ACCOUNT:
return "Account"
case SYSTEM:
return "System"
}
return "Unknown Type"
}
// swapAccountAfterReload will check to make sure the bound account for this client
// is current. Under certain circumstances after a reload we could be pointing to
// an older one.
func (c *client) swapAccountAfterReload() {
c.mu.Lock()
defer c.mu.Unlock()
if c.srv == nil {
return
}
acc, _ := c.srv.LookupAccount(c.acc.Name)
c.acc = acc
}
// processSubsOnConfigReload removes any subscriptions the client has that are no
// longer authorized, and checks for imports (accounts) due to a config reload.
func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
c.mu.Lock()
var (
checkPerms = c.perms != nil
checkAcc = c.acc != nil
acc = c.acc
)
if !checkPerms && !checkAcc {
c.mu.Unlock()
return
}
var (
_subs [32]*subscription
subs = _subs[:0]
_removed [32]*subscription
removed = _removed[:0]
srv = c.srv
)
if checkAcc {
// We actually only want to check if stream imports have changed.
if _, ok := awcsti[acc.Name]; !ok {
checkAcc = false
}
}
// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
// so we do that here as we collect them. We will check result down below.
c.mperms = nil
// Collect client's subs under the lock
for _, sub := range c.subs {
// Just checking to rebuild mperms under the lock, will collect removed though here.
// Only collect under subs array of canSubscribe and checkAcc true.
canSub := c.canSubscribe(string(sub.subject))
canQSub := sub.queue != nil && c.canQueueSubscribe(string(sub.subject), string(sub.queue))
if !canSub && !canQSub {
removed = append(removed, sub)
} else if checkAcc {
subs = append(subs, sub)
}
}
c.mu.Unlock()
// This list is all subs who are allowed and we need to check accounts.
for _, sub := range subs {
c.mu.Lock()
oldShadows := sub.shadow
sub.shadow = nil
c.mu.Unlock()
c.addShadowSubscriptions(acc, sub)
for _, nsub := range oldShadows {
nsub.im.acc.sl.Remove(nsub)
}
}
// Unsubscribe all that need to be removed and report back to client and logs.
for _, sub := range removed {
c.unsubscribe(acc, sub, true, true)
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
sub.subject, sub.sid))
srv.Noticef("Removed sub %q (sid %q) for %s - not authorized",
sub.subject, sub.sid, c.getAuthUser())
}
}
// Allows us to count up all the queue subscribers during close.
type qsub struct {
sub *subscription
n int32
}
func (c *client) closeConnection(reason ClosedState) {
c.mu.Lock()
if c.flags.isSet(closeConnection) {
c.mu.Unlock()
return
}
// Note that we may have markConnAsClosed() invoked before closeConnection(),
// so don't set this to 1, instead bump the count.
c.rref++
c.flags.set(closeConnection)
c.clearAuthTimer()
c.clearPingTimer()
c.markConnAsClosed(reason)
// Unblock anyone who is potentially stalled waiting on us.
if c.out.stc != nil {
close(c.out.stc)
c.out.stc = nil
}
var (
connectURLs []string
wsConnectURLs []string
kind = c.kind
srv = c.srv
noReconnect = c.flags.isSet(noReconnect)
acc = c.acc
)
// Snapshot for use if we are a client connection.
// FIXME(dlc) - we can just stub in a new one for client
// and reference existing one.
var subs []*subscription
if kind == CLIENT || kind == LEAF || kind == JETSTREAM {
var _subs [32]*subscription
subs = _subs[:0]
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
sub.close()
subs = append(subs, sub)
}
}
if c.route != nil {
connectURLs = c.route.connectURLs
wsConnectURLs = c.route.wsConnURLs
}
// If we have remote latency tracking running shut that down.
if c.rrTracking != nil {
c.rrTracking.ptmr.Stop()
c.rrTracking = nil
}
c.mu.Unlock()
// Remove client's or leaf node or jetstream subscriptions.
if acc != nil && (kind == CLIENT || kind == LEAF || kind == JETSTREAM) {
acc.sl.RemoveBatch(subs)
} else if kind == ROUTER {
go c.removeRemoteSubs()
}
if srv != nil {
// If this is a route that disconnected, possibly send an INFO with
// the updated list of connect URLs to clients that know how to
// handle async INFOs.
if (len(connectURLs) > 0 || len(wsConnectURLs) > 0) && !srv.getOpts().Cluster.NoAdvertise {
srv.removeConnectURLsAndSendINFOToClients(connectURLs, wsConnectURLs)
}
// Unregister
srv.removeClient(c)
// Update remote subscriptions.
if acc != nil && (kind == CLIENT || kind == LEAF) {
qsubs := map[string]*qsub{}
for _, sub := range subs {
// Call unsubscribe here to cleanup shadow subscriptions and such.
c.unsubscribe(acc, sub, true, false)
// Update route as normal for a normal subscriber.
if sub.queue == nil {
srv.updateRouteSubscriptionMap(acc, sub, -1)
srv.updateLeafNodes(acc, sub, -1)
} else {
// We handle queue subscribers special in case we
// have a bunch we can just send one update to the
// connected routes.
key := string(sub.subject) + " " + string(sub.queue)
if esub, ok := qsubs[key]; ok {
esub.n++
} else {
qsubs[key] = &qsub{sub, 1}
}
}
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
}
// Process any qsubs here.
for _, esub := range qsubs {
srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
srv.updateLeafNodes(acc, esub.sub, -(esub.n))
}
if prev := acc.removeClient(c); prev == 1 && srv != nil {
srv.decActiveAccounts()
}
}
}
// Don't reconnect connections that have been marked with
// the no reconnect flag.
if noReconnect {
return
}
c.reconnect()
}
// Depending on the kind of connections, this may attempt to recreate a connection.
// The actual reconnect attempt will be started in a go routine.
func (c *client) reconnect() {
var (
retryImplicit bool
gwName string
gwIsOutbound bool
gwCfg *gatewayCfg
)
c.mu.Lock()
// Decrease the ref count and perform the reconnect only if == 0.
c.rref--
if c.flags.isSet(noReconnect) || c.rref > 0 {
c.mu.Unlock()
return
}
if c.route != nil {
retryImplicit = c.route.retry
}
kind := c.kind
if kind == GATEWAY {
gwName = c.gw.name
gwIsOutbound = c.gw.outbound
gwCfg = c.gw.cfg
}
srv := c.srv
c.mu.Unlock()
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
srv.Debugf("Detected route to self, ignoring %q", rurl)
return
} else if rtype != Implicit || retryImplicit {
srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
} else if srv != nil && kind == GATEWAY && gwIsOutbound {
if gwCfg != nil {
srv.Debugf("Attempting reconnect for gateway %q", gwName)
// Run this as a go routine since we may be called within
// the solicitGateway itself if there was an error during
// the creation of the gateway connection.
srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) })
} else {
srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName)
}
} else if c.isSolicitedLeafNode() {
// Check if this is a solicited leaf node. Start up a reconnect.
srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(c.leaf.remote) })
}
}
// Set the noReconnect flag. This is used before a call to closeConnection()
// to prevent the connection to reconnect (routes, gateways).
func (c *client) setNoReconnect() {
c.mu.Lock()
c.flags.set(noReconnect)
c.mu.Unlock()
}
// Returns the client's RTT value with the protection of the client's lock.
func (c *client) getRTTValue() time.Duration {
c.mu.Lock()
rtt := c.rtt
c.mu.Unlock()
return rtt
}
// This function is used by ROUTER and GATEWAY connections to
// look for a subject on a given account (since these type of
// connections are not bound to a specific account).
// If the c.pa.subject is found in the cache, the cached result
// is returned, otherwse, we match the account's sublist and update
// the cache. The cache is pruned if reaching a certain size.
func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) {
var (
acc *Account
pac *perAccountCache
r *SublistResult
ok bool
)
// Check our cache.
if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok {
// Check the genid to see if it's still valid.
if genid := atomic.LoadUint64(&pac.acc.sl.genid); genid != pac.genid {
ok = false
delete(c.in.pacache, string(c.pa.pacache))
} else {
acc = pac.acc
r = pac.results
}
}
if !ok {
// Match correct account and sublist.
if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil {
return nil, nil
}
// Match against the account sublist.
r = acc.sl.Match(string(c.pa.subject))
// Store in our cache
c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&acc.sl.genid)}
// Check if we need to prune.
if len(c.in.pacache) > maxPerAccountCacheSize {
c.prunePerAccountCache()
}
}
return acc, r
}
// Account will return the associated account for this client.
func (c *client) Account() *Account {
if c == nil {
return nil
}
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
return acc
}
// prunePerAccountCache will prune off a random number of cache entries.
func (c *client) prunePerAccountCache() {
n := 0
for cacheKey := range c.in.pacache {
delete(c.in.pacache, cacheKey)
if n++; n > prunePerAccountCacheSize {
break
}
}
}
// pruneClosedSubFromPerAccountCache remove entries that contain subscriptions
// that have been closed.
func (c *client) pruneClosedSubFromPerAccountCache() {
for cacheKey, pac := range c.in.pacache {
for _, sub := range pac.results.psubs {
if sub.isClosed() {
goto REMOVE
}
}
for _, qsub := range pac.results.qsubs {
for _, sub := range qsub {
if sub.isClosed() {
goto REMOVE
}
}
}
continue
REMOVE:
delete(c.in.pacache, cacheKey)
}
}
// Grabs the information for this client.
func (c *client) getClientInfo(detailed bool) *ClientInfo {
if c == nil || (c.kind != CLIENT && c.kind != LEAF) {
return nil
}
// Server name. Defaults to server ID if not set explicitly.
var sn string
if detailed && c.kind != LEAF {
sn = c.srv.Name()
}
c.mu.Lock()
var ci ClientInfo
// RTT and Account are always added.
ci.Account = accForClient(c)
ci.RTT = c.rtt
// Detailed signals additional opt in.
if detailed {
if c.kind == LEAF {
sn = c.leaf.remoteServer
}
ci.Start = &c.start
ci.Host = c.host
ci.ID = c.cid
ci.Name = c.opts.Name
ci.User = c.getRawAuthUser()
ci.Lang = c.opts.Lang
ci.Version = c.opts.Version
ci.Server = sn
ci.Jwt = c.opts.JWT
ci.IssuerKey = issuerForClient(c)
ci.NameTag = c.nameTag
ci.Tags = c.tags
}
c.mu.Unlock()
return &ci
}
// getRAwAuthUser returns the raw auth user for the client.
// Lock should be held.
func (c *client) getRawAuthUser() string {
switch {
case c.opts.Nkey != "":
return c.opts.Nkey
case c.opts.Username != "":
return c.opts.Username
case c.opts.JWT != "":
return c.pubKey
case c.opts.Token != "":
return c.opts.Token
default:
return ""
}
}
// getAuthUser returns the auth user for the client.
// Lock should be held.
func (c *client) getAuthUser() string {
switch {
case c.opts.Nkey != "":
return fmt.Sprintf("Nkey %q", c.opts.Nkey)
case c.opts.Username != "":
return fmt.Sprintf("User %q", c.opts.Username)
case c.opts.JWT != "":
return fmt.Sprintf("JWT User %q", c.pubKey)
default:
return `User "N/A"`
}
}
// Given an array of strings, this function converts it to a map as long
// as all the content (converted to upper-case) matches some constants.
// Converts the given array of strings to a map of string.
// The strings are converted to upper-case and added to the map only
// if the server recognize them as valid connection types.
// If there are unknown connection types, the map of valid ones is returned
// along with an error that contains the name of the unknown.
func convertAllowedConnectionTypes(cts []string) (map[string]struct{}, error) {
var unknown []string
m := make(map[string]struct{}, len(cts))
for _, i := range cts {
i = strings.ToUpper(i)
switch i {
case jwt.ConnectionTypeStandard, jwt.ConnectionTypeWebsocket, jwt.ConnectionTypeLeafnode, jwt.ConnectionTypeMqtt:
m[i] = struct{}{}
default:
unknown = append(unknown, i)
}
}
var err error
// We will still return the map of valid ones.
if len(unknown) != 0 {
err = fmt.Errorf("invalid connection types %q", unknown)
}
return m, err
}
// This will return true if the connection is of a type present in the given `acts` map.
// Note that so far this is used only for CLIENT or LEAF connections.
// But a CLIENT can be standard or websocket (and other types in the future).
func (c *client) connectionTypeAllowed(acts map[string]struct{}) bool {
// Empty means all type of clients are allowed
if len(acts) == 0 {
return true
}
var want string
switch c.kind {
case CLIENT:
switch c.clientType() {
case NATS:
want = jwt.ConnectionTypeStandard
case WS:
want = jwt.ConnectionTypeWebsocket
case MQTT:
want = jwt.ConnectionTypeMqtt
}
case LEAF:
want = jwt.ConnectionTypeLeafnode
}
_, ok := acts[want]
return ok
}
// isClosed returns true if either closeConnection or connMarkedClosed
// flag have been set, or if `nc` is nil, which may happen in tests.
func (c *client) isClosed() bool {
return c.flags.isSet(closeConnection) || c.flags.isSet(connMarkedClosed) || c.nc == nil
}
// Logging functionality scoped to a client or route.
func (c *client) Error(err error) {
c.srv.Errors(c, err)
}
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
func (c *client) Warnf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Warnf(format, v...)
}
| 1 | 12,374 | This is consistent for all clients that we will check yes? Seems like we should just use the singleton, maybe pass it to the parse function or since clients have a server pointer set it at server start and just access that way without locks? | nats-io-nats-server | go |
@@ -62,7 +62,7 @@ public class JavaTokenizer extends JavaCCTokenizer {
if (ignoreLiterals && (javaToken.kind == JavaTokenKinds.STRING_LITERAL
|| javaToken.kind == JavaTokenKinds.CHARACTER_LITERAL
- || javaToken.kind == JavaTokenKinds.DECIMAL_LITERAL
+ || javaToken.kind == JavaTokenKinds.INTEGER_LITERAL
|| javaToken.kind == JavaTokenKinds.FLOATING_POINT_LITERAL)) {
image = String.valueOf(javaToken.kind);
} | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.cpd;
import java.io.IOException;
import java.io.StringReader;
import java.util.Deque;
import java.util.LinkedList;
import java.util.Properties;
import net.sourceforge.pmd.cpd.internal.JavaCCTokenizer;
import net.sourceforge.pmd.cpd.token.JavaCCTokenFilter;
import net.sourceforge.pmd.cpd.token.TokenFilter;
import net.sourceforge.pmd.lang.TokenManager;
import net.sourceforge.pmd.lang.ast.GenericToken;
import net.sourceforge.pmd.lang.ast.impl.javacc.JavaccToken;
import net.sourceforge.pmd.lang.java.ast.JavaTokenKinds;
import net.sourceforge.pmd.lang.java.ast.JavaTokenManager;
public class JavaTokenizer extends JavaCCTokenizer {
public static final String CPD_START = "\"CPD-START\"";
public static final String CPD_END = "\"CPD-END\"";
private boolean ignoreAnnotations;
private boolean ignoreLiterals;
private boolean ignoreIdentifiers;
private ConstructorDetector constructorDetector;
public void setProperties(Properties properties) {
ignoreAnnotations = Boolean.parseBoolean(properties.getProperty(IGNORE_ANNOTATIONS, "false"));
ignoreLiterals = Boolean.parseBoolean(properties.getProperty(IGNORE_LITERALS, "false"));
ignoreIdentifiers = Boolean.parseBoolean(properties.getProperty(IGNORE_IDENTIFIERS, "false"));
}
@Override
public void tokenize(SourceCode sourceCode, Tokens tokenEntries) throws IOException {
constructorDetector = new ConstructorDetector(ignoreIdentifiers);
super.tokenize(sourceCode, tokenEntries);
}
@Override
protected TokenManager getLexerForSource(SourceCode sourceCode) {
final StringBuilder stringBuilder = sourceCode.getCodeBuffer();
return new JavaTokenManager(new StringReader(stringBuilder.toString()));
}
@Override
protected TokenFilter getTokenFilter(TokenManager tokenManager) {
return new JavaTokenFilter(tokenManager, ignoreAnnotations);
}
@Override
protected TokenEntry processToken(Tokens tokenEntries, GenericToken currentToken, String fileName) {
String image = currentToken.getImage();
JavaccToken javaToken = (JavaccToken) currentToken;
constructorDetector.restoreConstructorToken(tokenEntries, javaToken);
if (ignoreLiterals && (javaToken.kind == JavaTokenKinds.STRING_LITERAL
|| javaToken.kind == JavaTokenKinds.CHARACTER_LITERAL
|| javaToken.kind == JavaTokenKinds.DECIMAL_LITERAL
|| javaToken.kind == JavaTokenKinds.FLOATING_POINT_LITERAL)) {
image = String.valueOf(javaToken.kind);
}
if (ignoreIdentifiers && javaToken.kind == JavaTokenKinds.IDENTIFIER) {
image = String.valueOf(javaToken.kind);
}
constructorDetector.processToken(javaToken);
return new TokenEntry(image, fileName, currentToken.getBeginLine(), currentToken.getBeginColumn(), currentToken.getEndColumn());
}
public void setIgnoreLiterals(boolean ignore) {
this.ignoreLiterals = ignore;
}
public void setIgnoreIdentifiers(boolean ignore) {
this.ignoreIdentifiers = ignore;
}
public void setIgnoreAnnotations(boolean ignoreAnnotations) {
this.ignoreAnnotations = ignoreAnnotations;
}
/**
* The {@link JavaTokenFilter} extends the {@link JavaCCTokenFilter} to discard
* Java-specific tokens.
* <p>
* By default, it discards semicolons, package and import statements, and
* enables annotation-based CPD suppression. Optionally, all annotations can be ignored, too.
* </p>
*/
private static class JavaTokenFilter extends JavaCCTokenFilter {
private boolean isAnnotation = false;
private boolean nextTokenEndsAnnotation = false;
private int annotationStack = 0;
private boolean discardingSemicolon = false;
private boolean discardingKeywords = false;
private boolean discardingSuppressing = false;
private boolean discardingAnnotations = false;
private boolean ignoreAnnotations = false;
JavaTokenFilter(final TokenManager tokenManager, final boolean ignoreAnnotations) {
super(tokenManager);
this.ignoreAnnotations = ignoreAnnotations;
}
@Override
protected void analyzeToken(final GenericToken currentToken) {
JavaccToken token = (JavaccToken) currentToken;
detectAnnotations(token);
skipSemicolon(token);
skipPackageAndImport(token);
skipAnnotationSuppression(token);
if (ignoreAnnotations) {
skipAnnotations();
}
}
private void skipPackageAndImport(final JavaccToken currentToken) {
if (currentToken.kind == JavaTokenKinds.PACKAGE || currentToken.kind == JavaTokenKinds.IMPORT) {
discardingKeywords = true;
} else if (discardingKeywords && currentToken.kind == JavaTokenKinds.SEMICOLON) {
discardingKeywords = false;
}
}
private void skipSemicolon(final JavaccToken currentToken) {
if (currentToken.kind == JavaTokenKinds.SEMICOLON) {
discardingSemicolon = true;
} else if (discardingSemicolon) {
discardingSemicolon = false;
}
}
private void skipAnnotationSuppression(final JavaccToken currentToken) {
// if processing an annotation, look for a CPD-START or CPD-END
if (isAnnotation) {
if (!discardingSuppressing && currentToken.kind == JavaTokenKinds.STRING_LITERAL
&& CPD_START.equals(currentToken.getImage())) {
discardingSuppressing = true;
} else if (discardingSuppressing && currentToken.kind == JavaTokenKinds.STRING_LITERAL
&& CPD_END.equals(currentToken.getImage())) {
discardingSuppressing = false;
}
}
}
private void skipAnnotations() {
if (!discardingAnnotations && isAnnotation) {
discardingAnnotations = true;
} else if (discardingAnnotations && !isAnnotation) {
discardingAnnotations = false;
}
}
@Override
protected boolean isLanguageSpecificDiscarding() {
return discardingSemicolon || discardingKeywords || discardingAnnotations
|| discardingSuppressing;
}
private void detectAnnotations(JavaccToken currentToken) {
if (isAnnotation && nextTokenEndsAnnotation) {
isAnnotation = false;
nextTokenEndsAnnotation = false;
}
if (isAnnotation) {
if (currentToken.kind == JavaTokenKinds.LPAREN) {
annotationStack++;
} else if (currentToken.kind == JavaTokenKinds.RPAREN) {
annotationStack--;
if (annotationStack == 0) {
nextTokenEndsAnnotation = true;
}
} else if (annotationStack == 0 && currentToken.kind != JavaTokenKinds.IDENTIFIER
&& currentToken.kind != JavaTokenKinds.LPAREN) {
isAnnotation = false;
}
}
if (currentToken.kind == JavaTokenKinds.AT) {
isAnnotation = true;
}
}
}
/**
* The {@link ConstructorDetector} consumes token by token and maintains
* state. It can detect, whether the current token belongs to a constructor
* method identifier and if so, is able to restore it when using
* ignoreIdentifiers.
*/
private static class ConstructorDetector {
private boolean ignoreIdentifiers;
private Deque<TypeDeclaration> classMembersIndentations;
private int currentNestingLevel;
private boolean storeNextIdentifier;
private String prevIdentifier;
ConstructorDetector(boolean ignoreIdentifiers) {
this.ignoreIdentifiers = ignoreIdentifiers;
currentNestingLevel = 0;
classMembersIndentations = new LinkedList<>();
}
public void processToken(JavaccToken currentToken) {
if (!ignoreIdentifiers) {
return;
}
switch (currentToken.kind) {
case JavaTokenKinds.IDENTIFIER:
if ("enum".equals(currentToken.getImage())) {
// If declaring an enum, add a new block nesting level at
// which constructors may exist
pushTypeDeclaration();
} else if (storeNextIdentifier) {
classMembersIndentations.peek().name = currentToken.getImage();
storeNextIdentifier = false;
}
// Store this token
prevIdentifier = currentToken.getImage();
break;
case JavaTokenKinds.CLASS:
// If declaring a class, add a new block nesting level at which
// constructors may exist
pushTypeDeclaration();
break;
case JavaTokenKinds.LBRACE:
currentNestingLevel++;
break;
case JavaTokenKinds.RBRACE:
// Discard completed blocks
if (!classMembersIndentations.isEmpty()
&& classMembersIndentations.peek().indentationLevel == currentNestingLevel) {
classMembersIndentations.pop();
}
currentNestingLevel--;
break;
default:
/*
* Did we find a "class" token not followed by an identifier? i.e:
* expectThrows(IllegalStateException.class, () -> {
* newSearcher(r).search(parentQuery.build(), c);
* });
*/
if (storeNextIdentifier) {
classMembersIndentations.pop();
storeNextIdentifier = false;
}
break;
}
}
private void pushTypeDeclaration() {
TypeDeclaration cd = new TypeDeclaration(currentNestingLevel + 1);
classMembersIndentations.push(cd);
storeNextIdentifier = true;
}
public void restoreConstructorToken(Tokens tokenEntries, JavaccToken currentToken) {
if (!ignoreIdentifiers) {
return;
}
if (currentToken.kind == JavaTokenKinds.LPAREN) {
// was the previous token a constructor? If so, restore the
// identifier
if (!classMembersIndentations.isEmpty()
&& classMembersIndentations.peek().name.equals(prevIdentifier)) {
int lastTokenIndex = tokenEntries.size() - 1;
TokenEntry lastToken = tokenEntries.getTokens().get(lastTokenIndex);
lastToken.setImage(prevIdentifier);
}
}
}
}
private static class TypeDeclaration {
int indentationLevel;
String name;
TypeDeclaration(int indentationLevel) {
this.indentationLevel = indentationLevel;
}
}
}
| 1 | 16,934 | Note that this is a bug, that should be fixed on master. `DECIMAL_LITERAL` cannot match any token, because it's declared with a `#`. | pmd-pmd | java |
@@ -26,6 +26,8 @@ import (
"sync/atomic"
"time"
+ "github.com/nats-io/nuid"
+
"github.com/nats-io/nats-server/v2/server/pse"
)
| 1 | // Copyright 2018-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nats-server/v2/server/pse"
)
const (
connectEventSubj = "$SYS.ACCOUNT.%s.CONNECT"
disconnectEventSubj = "$SYS.ACCOUNT.%s.DISCONNECT"
accConnsReqSubj = "$SYS.REQ.ACCOUNT.%s.CONNS"
accUpdateEventSubj = "$SYS.ACCOUNT.%s.CLAIMS.UPDATE"
connsRespSubj = "$SYS._INBOX_.%s"
accConnsEventSubj = "$SYS.SERVER.ACCOUNT.%s.CONNS"
shutdownEventSubj = "$SYS.SERVER.%s.SHUTDOWN"
authErrorEventSubj = "$SYS.SERVER.%s.CLIENT.AUTH.ERR"
serverStatsSubj = "$SYS.SERVER.%s.STATSZ"
serverStatsReqSubj = "$SYS.REQ.SERVER.%s.STATSZ"
serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING"
leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT"
remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s"
inboxRespSubj = "$SYS._INBOX.%s.%s"
// FIXME(dlc) - Should account scope, even with wc for now, but later on
// we can then shard as needed.
accNumSubsReqSubj = "$SYS.REQ.ACCOUNT.NSUBS"
// These are for exported debug services. These are local to this server only.
accSubsSubj = "$SYS.DEBUG.SUBSCRIBERS"
shutdownEventTokens = 4
serverSubjectIndex = 2
accUpdateTokens = 5
accUpdateAccIndex = 2
)
// FIXME(dlc) - make configurable.
var eventsHBInterval = 30 * time.Second
// Used to send and receive messages from inside the server.
type internal struct {
account *Account
client *client
seq uint64
sid int
servers map[string]*serverUpdate
sweeper *time.Timer
stmr *time.Timer
replies map[string]msgHandler
sendq chan *pubMsg
wg sync.WaitGroup
orphMax time.Duration
chkOrph time.Duration
statsz time.Duration
shash string
inboxPre string
}
// ServerStatsMsg is sent periodically with stats updates.
type ServerStatsMsg struct {
Server ServerInfo `json:"server"`
Stats ServerStats `json:"statsz"`
}
// ConnectEventMsg is sent when a new connection is made that is part of an account.
type ConnectEventMsg struct {
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
}
// DisconnectEventMsg is sent when a new connection previously defined from a
// ConnectEventMsg is closed.
type DisconnectEventMsg struct {
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Reason string `json:"reason"`
}
// AccountNumConns is an event that will be sent from a server that is tracking
// a given account when the number of connections changes. It will also HB
// updates in the absence of any changes.
type AccountNumConns struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
Conns int `json:"conns"`
LeafNodes int `json:"leafnodes"`
TotalConns int `json:"total_conns"`
}
// accNumConnsReq is sent when we are starting to track an account for the first
// time. We will request others send info to us about their local state.
type accNumConnsReq struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
}
// ServerInfo identifies remote servers.
type ServerInfo struct {
Name string `json:"name"`
Host string `json:"host"`
ID string `json:"id"`
Cluster string `json:"cluster,omitempty"`
Version string `json:"ver"`
Seq uint64 `json:"seq"`
JetStream bool `json:"jetstream"`
Time time.Time `json:"time"`
}
// ClientInfo is detailed information about the client forming a connection.
type ClientInfo struct {
Start time.Time `json:"start,omitempty"`
Host string `json:"host,omitempty"`
ID uint64 `json:"id"`
Account string `json:"acc"`
User string `json:"user,omitempty"`
Name string `json:"name,omitempty"`
Lang string `json:"lang,omitempty"`
Version string `json:"ver,omitempty"`
RTT string `json:"rtt,omitempty"`
Stop *time.Time `json:"stop,omitempty"`
}
// ServerStats hold various statistics that we will periodically send out.
type ServerStats struct {
Start time.Time `json:"start"`
Mem int64 `json:"mem"`
Cores int `json:"cores"`
CPU float64 `json:"cpu"`
Connections int `json:"connections"`
TotalConnections uint64 `json:"total_connections"`
ActiveAccounts int `json:"active_accounts"`
NumSubs uint32 `json:"subscriptions"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
SlowConsumers int64 `json:"slow_consumers"`
Routes []*RouteStat `json:"routes,omitempty"`
Gateways []*GatewayStat `json:"gateways,omitempty"`
}
// RouteStat holds route statistics.
type RouteStat struct {
ID uint64 `json:"rid"`
Name string `json:"name,omitempty"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Pending int `json:"pending"`
}
// GatewayStat holds gateway statistics.
type GatewayStat struct {
ID uint64 `json:"gwid"`
Name string `json:"name"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
NumInbound int `json:"inbound_connections"`
}
// DataStats reports how may msg and bytes. Applicable for both sent and received.
type DataStats struct {
Msgs int64 `json:"msgs"`
Bytes int64 `json:"bytes"`
}
// Used for internally queueing up messages that the server wants to send.
type pubMsg struct {
acc *Account
sub string
rply string
si *ServerInfo
msg interface{}
last bool
}
// Used to track server updates.
type serverUpdate struct {
seq uint64
ltime time.Time
}
// internalSendLoop will be responsible for serializing all messages that
// a server wants to send.
func (s *Server) internalSendLoop(wg *sync.WaitGroup) {
defer wg.Done()
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
c := s.sys.client
sysacc := s.sys.account
sendq := s.sys.sendq
id := s.info.ID
host := s.info.Host
servername := s.info.Name
seqp := &s.sys.seq
js := s.js != nil
var cluster string
if s.gateway.enabled {
cluster = s.getGatewayName()
}
s.mu.Unlock()
// Warn when internal send queue is backed up past 75%
warnThresh := 3 * internalSendQLen / 4
warnFreq := time.Second
last := time.Now().Add(-warnFreq)
for s.eventsRunning() {
// Setup information for next message
if len(sendq) > warnThresh && time.Since(last) >= warnFreq {
s.Warnf("Internal system send queue > 75%%")
last = time.Now()
}
select {
case pm := <-sendq:
if pm.si != nil {
pm.si.Name = servername
pm.si.Host = host
pm.si.Cluster = cluster
pm.si.ID = id
pm.si.Seq = atomic.AddUint64(seqp, 1)
pm.si.Version = VERSION
pm.si.Time = time.Now()
pm.si.JetStream = js
}
var b []byte
if pm.msg != nil {
switch v := pm.msg.(type) {
case string:
b = []byte(v)
case []byte:
b = v
default:
b, _ = json.MarshalIndent(pm.msg, _EMPTY_, " ")
}
}
c.mu.Lock()
// We can have an override for account here.
if pm.acc != nil {
c.acc = pm.acc
} else {
c.acc = sysacc
}
// Prep internal structures needed to send message.
c.pa.subject = []byte(pm.sub)
c.pa.size = len(b)
c.pa.szb = []byte(strconv.FormatInt(int64(len(b)), 10))
c.pa.reply = []byte(pm.rply)
trace := c.trace
c.mu.Unlock()
// Add in NL
b = append(b, _CRLF_...)
if trace {
c.traceInOp(fmt.Sprintf("PUB %s %s %d",
c.pa.subject, c.pa.reply, c.pa.size), nil)
c.traceMsg(b)
}
c.processInboundClientMsg(b)
// See if we are doing graceful shutdown.
if !pm.last {
c.flushClients(0) // Never spend time in place.
} else {
// For the Shutdown event, we need to send in place otherwise
// there is a chance that the process will exit before the
// writeLoop has a chance to send it.
c.flushClients(time.Second)
return
}
case <-s.quitCh:
return
}
}
}
// Will send a shutdown message.
func (s *Server) sendShutdownEvent() {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
subj := fmt.Sprintf(shutdownEventSubj, s.info.ID)
sendq := s.sys.sendq
// Stop any more messages from queueing up.
s.sys.sendq = nil
// Unhook all msgHandlers. Normal client cleanup will deal with subs, etc.
s.sys.replies = nil
s.mu.Unlock()
// Send to the internal queue and mark as last.
sendq <- &pubMsg{nil, subj, _EMPTY_, nil, nil, true}
}
// Used to send an internal message to an arbitrary account.
func (s *Server) sendInternalAccountMsg(a *Account, subject string, msg interface{}) error {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return ErrNoSysAccount
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{a, subject, "", nil, msg, false}
return nil
}
// This will queue up a message to be sent.
// Lock should not be held.
func (s *Server) sendInternalMsgLocked(sub, rply string, si *ServerInfo, msg interface{}) {
s.mu.Lock()
s.sendInternalMsg(sub, rply, si, msg)
s.mu.Unlock()
}
// This will queue up a message to be sent.
// Assumes lock is held on entry.
func (s *Server) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface{}) {
if s.sys == nil || s.sys.sendq == nil {
return
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{nil, sub, rply, si, msg, false}
s.mu.Lock()
}
// Locked version of checking if events system running. Also checks server.
func (s *Server) eventsRunning() bool {
s.mu.Lock()
er := s.running && s.eventsEnabled()
s.mu.Unlock()
return er
}
// EventsEnabled will report if the server has internal events enabled via
// a defined system account.
func (s *Server) EventsEnabled() bool {
s.mu.Lock()
ee := s.eventsEnabled()
s.mu.Unlock()
return ee
}
// eventsEnabled will report if events are enabled.
// Lock should be held.
func (s *Server) eventsEnabled() bool {
return s.sys != nil && s.sys.client != nil && s.sys.account != nil
}
// TrackedRemoteServers returns how many remote servers we are tracking
// from a system events perspective.
func (s *Server) TrackedRemoteServers() int {
s.mu.Lock()
if !s.running || !s.eventsEnabled() {
return -1
}
ns := len(s.sys.servers)
s.mu.Unlock()
return ns
}
// Check for orphan servers who may have gone away without notification.
// This should be wrapChk() to setup common locking.
func (s *Server) checkRemoteServers() {
now := time.Now()
for sid, su := range s.sys.servers {
if now.Sub(su.ltime) > s.sys.orphMax {
s.Debugf("Detected orphan remote server: %q", sid)
// Simulate it going away.
s.processRemoteServerShutdown(sid)
delete(s.sys.servers, sid)
}
}
if s.sys.sweeper != nil {
s.sys.sweeper.Reset(s.sys.chkOrph)
}
}
// Grab RSS and PCPU
func updateServerUsage(v *ServerStats) {
var rss, vss int64
var pcpu float64
pse.ProcUsage(&pcpu, &rss, &vss)
v.Mem = rss
v.CPU = pcpu
v.Cores = numCores
}
// Generate a route stat for our statz update.
func routeStat(r *client) *RouteStat {
if r == nil {
return nil
}
r.mu.Lock()
rs := &RouteStat{
ID: r.cid,
Sent: DataStats{
Msgs: atomic.LoadInt64(&r.outMsgs),
Bytes: atomic.LoadInt64(&r.outBytes),
},
Received: DataStats{
Msgs: atomic.LoadInt64(&r.inMsgs),
Bytes: atomic.LoadInt64(&r.inBytes),
},
Pending: int(r.out.pb),
}
if r.route != nil {
rs.Name = r.route.remoteName
}
r.mu.Unlock()
return rs
}
// Actual send method for statz updates.
// Lock should be held.
func (s *Server) sendStatsz(subj string) {
m := ServerStatsMsg{}
updateServerUsage(&m.Stats)
m.Stats.Start = s.start
m.Stats.Connections = len(s.clients)
m.Stats.TotalConnections = s.totalClients
m.Stats.ActiveAccounts = int(atomic.LoadInt32(&s.activeAccounts))
m.Stats.Received.Msgs = atomic.LoadInt64(&s.inMsgs)
m.Stats.Received.Bytes = atomic.LoadInt64(&s.inBytes)
m.Stats.Sent.Msgs = atomic.LoadInt64(&s.outMsgs)
m.Stats.Sent.Bytes = atomic.LoadInt64(&s.outBytes)
m.Stats.SlowConsumers = atomic.LoadInt64(&s.slowConsumers)
m.Stats.NumSubs = s.numSubscriptions()
for _, r := range s.routes {
m.Stats.Routes = append(m.Stats.Routes, routeStat(r))
}
if s.gateway.enabled {
gw := s.gateway
gw.RLock()
for name, c := range gw.out {
gs := &GatewayStat{Name: name}
c.mu.Lock()
gs.ID = c.cid
gs.Sent = DataStats{
Msgs: atomic.LoadInt64(&c.outMsgs),
Bytes: atomic.LoadInt64(&c.outBytes),
}
c.mu.Unlock()
// Gather matching inbound connections
gs.Received = DataStats{}
for _, c := range gw.in {
c.mu.Lock()
if c.gw.name == name {
gs.Received.Msgs += atomic.LoadInt64(&c.inMsgs)
gs.Received.Bytes += atomic.LoadInt64(&c.inBytes)
gs.NumInbound++
}
c.mu.Unlock()
}
m.Stats.Gateways = append(m.Stats.Gateways, gs)
}
gw.RUnlock()
}
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
}
// Send out our statz update.
// This should be wrapChk() to setup common locking.
func (s *Server) heartbeatStatsz() {
if s.sys.stmr != nil {
s.sys.stmr.Reset(s.sys.statsz)
}
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
}
// This should be wrapChk() to setup common locking.
func (s *Server) startStatszTimer() {
s.sys.stmr = time.AfterFunc(s.sys.statsz, s.wrapChk(s.heartbeatStatsz))
}
// Start a ticker that will fire periodically and check for orphaned servers.
// This should be wrapChk() to setup common locking.
func (s *Server) startRemoteServerSweepTimer() {
s.sys.sweeper = time.AfterFunc(s.sys.chkOrph, s.wrapChk(s.checkRemoteServers))
}
// Length of our system hash used for server targeted messages.
const sysHashLen = 6
// This will setup our system wide tracking subs.
// For now we will setup one wildcard subscription to
// monitor all accounts for changes in number of connections.
// We can make this on a per account tracking basis if needed.
// Tradeoff is subscription and interest graph events vs connect and
// disconnect events, etc.
func (s *Server) initEventTracking() {
if !s.eventsEnabled() {
return
}
// Create a system hash which we use for other servers to target us specifically.
sha := sha256.New()
sha.Write([]byte(s.info.ID))
s.sys.shash = base64.RawURLEncoding.EncodeToString(sha.Sum(nil))[:sysHashLen]
// This will be for all inbox responses.
subject := fmt.Sprintf(inboxRespSubj, s.sys.shash, "*")
if _, err := s.sysSubscribe(subject, s.inboxReply); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
s.sys.inboxPre = subject
// This is for remote updates for connection accounting.
subject = fmt.Sprintf(accConnsEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// This will be for responses for account info that we send out.
subject = fmt.Sprintf(connsRespSubj, s.info.ID)
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for broad requests to respond with account info.
subject = fmt.Sprintf(accConnsReqSubj, "*")
if _, err := s.sysSubscribe(subject, s.connsRequest); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for broad requests to respond with number of subscriptions for a given subject.
if _, err := s.sysSubscribe(accNumSubsReqSubj, s.nsubsRequest); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for all server shutdowns.
subject = fmt.Sprintf(shutdownEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteServerShutdown); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for account claims updates.
subject = fmt.Sprintf(accUpdateEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.accountClaimUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for requests for our statsz.
subject = fmt.Sprintf(serverStatsReqSubj, s.info.ID)
if _, err := s.sysSubscribe(subject, s.statszReq); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for ping messages that will be sent to all servers for statsz.
if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.statszReq); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for updates when leaf nodes connect for a given account. This will
// force any gateway connections to move to `modeInterestOnly`
subject = fmt.Sprintf(leafNodeConnectEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.leafNodeConnected); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// For tracking remote latency measurements.
subject = fmt.Sprintf(remoteLatencyEventSubj, s.sys.shash)
if _, err := s.sysSubscribe(subject, s.remoteLatencyUpdate); err != nil {
s.Errorf("Error setting up internal latency tracking: %v", err)
}
// These are for system account exports for debugging from client applications.
sacc := s.sys.account
// This is for simple debugging of number of subscribers that exist in the system.
if _, err := s.sysSubscribeInternal(accSubsSubj, s.debugSubscribers); err != nil {
s.Errorf("Error setting up internal debug service for subscribers: %v", err)
}
if err := sacc.AddServiceExport(accSubsSubj, nil); err != nil {
s.Errorf("Error adding system service export for %q: %v", accSubsSubj, err)
}
}
// accountClaimUpdate will receive claim updates for accounts.
func (s *Server) accountClaimUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.EventsEnabled() {
return
}
toks := strings.Split(subject, tsep)
if len(toks) < accUpdateTokens {
s.Debugf("Received account claims update on bad subject %q", subject)
return
}
if v, ok := s.accounts.Load(toks[accUpdateAccIndex]); ok {
s.updateAccountWithClaimJWT(v.(*Account), string(msg))
}
}
// processRemoteServerShutdown will update any affected accounts.
// Will update the remote count for clients.
// Lock assume held.
func (s *Server) processRemoteServerShutdown(sid string) {
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).removeRemoteServer(sid)
return true
})
}
// remoteServerShutdownEvent is called when we get an event from another server shutting down.
func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, reply string, msg []byte) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() {
return
}
toks := strings.Split(subject, tsep)
if len(toks) < shutdownEventTokens {
s.Debugf("Received remote server shutdown on bad subject %q", subject)
return
}
sid := toks[serverSubjectIndex]
su := s.sys.servers[sid]
if su != nil {
s.processRemoteServerShutdown(sid)
}
}
// updateRemoteServer is called when we have an update from a remote server.
// This allows us to track remote servers, respond to shutdown messages properly,
// make sure that messages are ordered, and allow us to prune dead servers.
// Lock should be held upon entry.
func (s *Server) updateRemoteServer(ms *ServerInfo) {
su := s.sys.servers[ms.ID]
if su == nil {
s.sys.servers[ms.ID] = &serverUpdate{ms.Seq, time.Now()}
s.processNewServer(ms)
} else {
// Should always be going up.
if ms.Seq <= su.seq {
s.Errorf("Received out of order remote server update from: %q", ms.ID)
return
}
su.seq = ms.Seq
su.ltime = time.Now()
}
}
// processNewServer will hold any logic we want to use when we discover a new server.
// Lock should be held upon entry.
func (s *Server) processNewServer(ms *ServerInfo) {
// Right now we only check if we have leafnode servers and if so send another
// connect update to make sure they switch this account to interest only mode.
s.ensureGWsInterestOnlyForLeafNodes()
}
// If GW is enabled on this server and there are any leaf node connections,
// this function will send a LeafNode connect system event to the super cluster
// to ensure that the GWs are in interest-only mode for this account.
// Lock should be held upon entry.
// TODO(dlc) - this will cause this account to be loaded on all servers. Need a better
// way with GW2.
func (s *Server) ensureGWsInterestOnlyForLeafNodes() {
if !s.gateway.enabled || len(s.leafs) == 0 {
return
}
sent := make(map[*Account]bool, len(s.leafs))
for _, c := range s.leafs {
if !sent[c.acc] {
s.sendLeafNodeConnectMsg(c.acc.Name)
sent[c.acc] = true
}
}
}
// shutdownEventing will clean up all eventing state.
func (s *Server) shutdownEventing() {
if !s.eventsRunning() {
return
}
s.mu.Lock()
clearTimer(&s.sys.sweeper)
clearTimer(&s.sys.stmr)
s.mu.Unlock()
// We will queue up a shutdown event and wait for the
// internal send loop to exit.
s.sendShutdownEvent()
s.sys.wg.Wait()
s.mu.Lock()
defer s.mu.Unlock()
// Whip through all accounts.
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).clearEventing()
return true
})
// Turn everything off here.
s.sys = nil
}
// Request for our local connection count.
func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := accNumConnsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
// Here we really only want to lookup the account if its local. We do not want to fetch this
// account if we have no interest in it.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
if acc == nil {
return
}
// We know this is a local connection.
if nlc := acc.NumLocalConnections(); nlc > 0 {
s.mu.Lock()
s.sendAccConnsUpdate(acc, reply)
s.mu.Unlock()
}
}
// leafNodeConnected is an event we will receive when a leaf node for a given account connects.
func (s *Server) leafNodeConnected(sub *subscription, _ *client, subject, reply string, msg []byte) {
m := accNumConnsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
s.mu.Lock()
na := m.Account == "" || !s.eventsEnabled() || !s.gateway.enabled
s.mu.Unlock()
if na {
return
}
if acc, _ := s.lookupAccount(m.Account); acc != nil {
s.switchAccountToInterestMode(acc.Name)
}
}
// statszReq is a request for us to respond with current statz.
func (s *Server) statszReq(sub *subscription, _ *client, subject, reply string, msg []byte) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() || reply == _EMPTY_ {
return
}
s.sendStatsz(reply)
}
// remoteConnsUpdate gets called when we receive a remote update from another server.
func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := AccountNumConns{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connection event message: %v", err)
return
}
// See if we have the account registered, if not drop it.
// Make sure this does not force us to load this account here.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
// Silently ignore these if we do not have local interest in the account.
if acc == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
// check again here if we have been shutdown.
if !s.running || !s.eventsEnabled() {
return
}
// Double check that this is not us, should never happen, so error if it does.
if m.Server.ID == s.info.ID {
s.sys.client.Errorf("Processing our own account connection event message: ignored")
return
}
// If we are here we have interest in tracking this account. Update our accounting.
acc.updateRemoteServer(&m)
s.updateRemoteServer(&m.Server)
}
// Setup tracking for this account. This allows us to track global account activity.
// Lock should be held on entry.
func (s *Server) enableAccountTracking(a *Account) {
if a == nil || !s.eventsEnabled() {
return
}
// TODO(ik): Generate payload although message may not be sent.
// May need to ensure we do so only if there is a known interest.
// This can get complicated with gateways.
subj := fmt.Sprintf(accConnsReqSubj, a.Name)
reply := fmt.Sprintf(connsRespSubj, s.info.ID)
m := accNumConnsReq{Account: a.Name}
s.sendInternalMsg(subj, reply, &m.Server, &m)
}
// Event on leaf node connect.
// Lock should NOT be held on entry.
func (s *Server) sendLeafNodeConnect(a *Account) {
s.mu.Lock()
// If we are not in operator mode, or do not have any gateways defined, this should also be a no-op.
if a == nil || !s.eventsEnabled() || !s.gateway.enabled {
s.mu.Unlock()
return
}
s.sendLeafNodeConnectMsg(a.Name)
s.mu.Unlock()
s.switchAccountToInterestMode(a.Name)
}
// Send the leafnode connect message.
// Lock should be held.
func (s *Server) sendLeafNodeConnectMsg(accName string) {
subj := fmt.Sprintf(leafNodeConnectEventSubj, accName)
m := accNumConnsReq{Account: accName}
s.sendInternalMsg(subj, "", &m.Server, &m)
}
// sendAccConnsUpdate is called to send out our information on the
// account's local connections.
// Lock should be held on entry.
func (s *Server) sendAccConnsUpdate(a *Account, subj string) {
if !s.eventsEnabled() || a == nil {
return
}
a.mu.RLock()
// Build event with account name and number of local clients and leafnodes.
m := AccountNumConns{
Account: a.Name,
Conns: a.numLocalConnections(),
LeafNodes: a.numLocalLeafNodes(),
TotalConns: a.numLocalConnections() + a.numLocalLeafNodes(),
}
a.mu.RUnlock()
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
// Set timer to fire again unless we are at zero.
a.mu.Lock()
if a.numLocalConnections() == 0 {
clearTimer(&a.ctmr)
} else {
// Check to see if we have an HB running and update.
if a.ctmr == nil {
a.ctmr = time.AfterFunc(eventsHBInterval, func() { s.accConnsUpdate(a) })
} else {
a.ctmr.Reset(eventsHBInterval)
}
}
a.mu.Unlock()
}
// accConnsUpdate is called whenever there is a change to the account's
// number of active connections, or during a heartbeat.
func (s *Server) accConnsUpdate(a *Account) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() || a == nil {
return
}
subj := fmt.Sprintf(accConnsEventSubj, a.Name)
s.sendAccConnsUpdate(a, subj)
}
// accountConnectEvent will send an account client connect event if there is interest.
// This is a billing event.
func (s *Server) accountConnectEvent(c *client) {
s.mu.Lock()
gacc := s.gacc
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := ConnectEventMsg{
Client: ClientInfo{
Start: c.start,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: nameForClient(c),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
},
}
c.mu.Unlock()
subj := fmt.Sprintf(connectEventSubj, c.acc.Name)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
// accountDisconnectEvent will send an account client disconnect event if there is interest.
// This is a billing event.
func (s *Server) accountDisconnectEvent(c *client, now time.Time, reason string) {
s.mu.Lock()
gacc := s.gacc
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := DisconnectEventMsg{
Client: ClientInfo{
Start: c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: nameForClient(c),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
},
Sent: DataStats{
Msgs: atomic.LoadInt64(&c.inMsgs),
Bytes: atomic.LoadInt64(&c.inBytes),
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: reason,
}
c.mu.Unlock()
subj := fmt.Sprintf(disconnectEventSubj, c.acc.Name)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
func (s *Server) sendAuthErrorEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
s.mu.Unlock()
now := time.Now()
c.mu.Lock()
m := DisconnectEventMsg{
Client: ClientInfo{
Start: c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: nameForClient(c),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
},
Sent: DataStats{
Msgs: c.inMsgs,
Bytes: c.inBytes,
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: AuthenticationViolation.String(),
}
c.mu.Unlock()
s.mu.Lock()
subj := fmt.Sprintf(authErrorEventSubj, s.info.ID)
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
s.mu.Unlock()
}
// Internal message callback. If the msg is needed past the callback it is
// required to be copied.
type msgHandler func(sub *subscription, client *client, subject, reply string, msg []byte)
// Create an internal subscription. No support for queue groups atm.
func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, false, cb)
}
// Create an internal subscription but do not forward interest.
func (s *Server) sysSubscribeInternal(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, true, cb)
}
func (s *Server) systemSubscribe(subject string, internalOnly bool, cb msgHandler) (*subscription, error) {
if !s.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
s.mu.Lock()
c := s.sys.client
trace := c.trace
s.sys.sid++
sid := strconv.Itoa(s.sys.sid)
s.mu.Unlock()
arg := []byte(subject + " " + sid)
if trace {
c.traceInOp("SUB", arg)
}
// Now create the subscription
sub, err := c.processSub(arg, internalOnly)
if err != nil {
return nil, err
}
c.mu.Lock()
sub.icb = cb
c.mu.Unlock()
return sub, nil
}
func (s *Server) sysUnsubscribe(sub *subscription) {
if sub == nil || !s.eventsEnabled() {
return
}
s.mu.Lock()
acc := s.sys.account
c := s.sys.client
s.mu.Unlock()
c.unsubscribe(acc, sub, true, true)
}
// This will generate the tracking subject for remote latency from the response subject.
func remoteLatencySubjectForResponse(subject []byte) string {
if !isTrackedReply(subject) {
return ""
}
toks := bytes.Split(subject, []byte(tsep))
// FIXME(dlc) - Sprintf may become a performance concern at some point.
return fmt.Sprintf(remoteLatencyEventSubj, toks[len(toks)-2])
}
// remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services.
func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, subject, _ string, msg []byte) {
if !s.eventsRunning() {
return
}
rl := remoteLatency{}
if err := json.Unmarshal(msg, &rl); err != nil {
s.Errorf("Error unmarshalling remot elatency measurement: %v", err)
return
}
// Now we need to look up the responseServiceImport associated with this measurement.
acc, err := s.LookupAccount(rl.Account)
if err != nil {
s.Warnf("Could not lookup account %q for latency measurement", rl.Account)
return
}
// Now get the request id / reply. We need to see if we have a GW prefix and if so strip that off.
reply := rl.ReqId
if gwPrefix, old := isGWRoutedSubjectAndIsOldPrefix([]byte(reply)); gwPrefix {
reply = string(getSubjectFromGWRoutedReply([]byte(reply), old))
}
acc.mu.RLock()
si := acc.imports.services[reply]
if si == nil {
acc.mu.RUnlock()
return
}
m1 := si.m1
m2 := rl.M2
lsub := si.latency.subject
acc.mu.RUnlock()
// So we have not processed the response tracking measurement yet.
if m1 == nil {
si.acc.mu.Lock()
// Double check since could have slipped in.
m1 = si.m1
if m1 == nil {
// Store our value there for them to pick up.
si.m1 = &m2
}
si.acc.mu.Unlock()
if m1 == nil {
return
}
}
// Calculate the correct latency given M1 and M2.
// M2 ServiceLatency is correct, so use that.
// M1 TotalLatency is correct, so use that.
// Will use those to back into NATS latency.
m1.merge(&m2)
// Make sure we remove the entry here.
acc.removeServiceImport(si.from)
// Send the metrics
s.sendInternalAccountMsg(acc, lsub, &m1)
}
// This is used for all inbox replies so that we do not send supercluster wide interest
// updates for every request. Same trick used in modern NATS clients.
func (s *Server) inboxReply(sub *subscription, c *client, subject, reply string, msg []byte) {
s.mu.Lock()
if !s.eventsEnabled() || s.sys.replies == nil {
s.mu.Unlock()
return
}
cb, ok := s.sys.replies[subject]
s.mu.Unlock()
if ok && cb != nil {
cb(sub, c, subject, reply, msg)
}
}
// Copied from go client.
// We could use serviceReply here instead to save some code.
// I prefer these semantics for the moment, when tracing you know what this is.
const (
InboxPrefix = "$SYS._INBOX."
inboxPrefixLen = len(InboxPrefix)
respInboxPrefixLen = inboxPrefixLen + sysHashLen + 1
replySuffixLen = 8 // Gives us 62^8
)
// Creates an internal inbox used for replies that will be processed by the global wc handler.
func (s *Server) newRespInbox() string {
var b [respInboxPrefixLen + replySuffixLen]byte
pres := b[:respInboxPrefixLen]
copy(pres, s.sys.inboxPre)
rn := rand.Int63()
for i, l := respInboxPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return string(b[:])
}
// accNumSubsReq is sent when we need to gather remote info on subs.
type accNumSubsReq struct {
Account string `json:"acc"`
Subject string `json:"subject"`
Queue []byte `json:"queue,omitempty"`
}
// helper function to total information from results to count subs.
func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) {
if rr == nil {
return
}
checkSub := func(sub *subscription) {
// TODO(dlc) - This could be smarter.
if qg != nil && !bytes.Equal(qg, sub.queue) {
return
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
if qg == nil {
for _, sub := range rr.psubs {
checkSub(sub)
}
}
for _, qsub := range rr.qsubs {
for _, sub := range qsub {
checkSub(sub)
}
}
return
}
// Allows users of large systems to debug active subscribers for a given subject.
// Payload should be the subject of interest.
func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply string, msg []byte) {
// Even though this is an internal only subscription, meaning interest was not forwarded, we could
// get one here from a GW in optimistic mode. Ignore for now.
// FIXME(dlc) - Should we send no interest here back to the GW?
if c.kind != CLIENT {
return
}
var nsubs int32
// We could have a single subject or we could have a subject and a wildcard separated by whitespace.
args := strings.Split(strings.TrimSpace(string(msg)), " ")
if len(args) == 0 {
s.sendInternalAccountMsg(c.acc, reply, 0)
return
}
tsubj := args[0]
var qgroup []byte
if len(args) > 1 {
qgroup = []byte(args[1])
}
if subjectIsLiteral(tsubj) {
// We will look up subscribers locally first then determine if we need to solicit other servers.
rr := c.acc.sl.Match(tsubj)
nsubs = totalSubs(rr, qgroup)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
c.acc.sl.All(&subs)
for _, sub := range subs {
if subjectIsSubsetMatch(string(sub.subject), tsubj) {
if qgroup != nil && !bytes.Equal(qgroup, sub.queue) {
continue
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
}
}
// We should have an idea of how many responses to expect from remote servers.
var expected = c.acc.expectedRemoteResponses()
// If we are only local, go ahead and return.
if expected == 0 {
s.sendInternalAccountMsg(c.acc, reply, nsubs)
return
}
// We need to solicit from others.
// To track status.
responses := int32(0)
done := make(chan (bool))
s.mu.Lock()
// Create direct reply inbox that we multiplex under the WC replies.
replySubj := s.newRespInbox()
// Store our handler.
s.sys.replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
if n, err := strconv.Atoi(string(msg)); err == nil {
atomic.AddInt32(&nsubs, int32(n))
}
if atomic.AddInt32(&responses, 1) >= expected {
select {
case done <- true:
default:
}
}
}
// Send the request to the other servers.
request := &accNumSubsReq{
Account: c.acc.Name,
Subject: tsubj,
Queue: qgroup,
}
s.sendInternalMsg(accNumSubsReqSubj, replySubj, nil, request)
s.mu.Unlock()
// FIXME(dlc) - We should rate limit here instead of blind Go routine.
go func() {
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
// Cleanup the WC entry.
s.mu.Lock()
delete(s.sys.replies, replySubj)
s.mu.Unlock()
// Send the response.
s.sendInternalAccountMsg(c.acc, reply, atomic.LoadInt32(&nsubs))
}()
}
// Request for our local subscription count. This will come from a remote origin server
// that received the initial request.
func (s *Server) nsubsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := accNumSubsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account nsubs request message: %v", err)
return
}
// Grab account.
acc, _ := s.lookupAccount(m.Account)
if acc == nil || acc.numLocalAndLeafConnections() == 0 {
return
}
// We will look up subscribers locally first then determine if we need to solicit other servers.
var nsubs int32
if subjectIsLiteral(m.Subject) {
rr := acc.sl.Match(m.Subject)
nsubs = totalSubs(rr, m.Queue)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if (sub.client.kind == CLIENT || sub.client.isHubLeafNode()) && subjectIsSubsetMatch(string(sub.subject), m.Subject) {
if m.Queue != nil && !bytes.Equal(m.Queue, sub.queue) {
continue
}
nsubs++
}
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, nil, nsubs)
}
// Helper to grab name for a client.
func nameForClient(c *client) string {
if c.user != nil {
return c.user.Nkey
}
return "N/A"
}
// Helper to grab account name for a client.
func accForClient(c *client) string {
if c.acc != nil {
return c.acc.Name
}
return "N/A"
}
// Helper to clear timers.
func clearTimer(tp **time.Timer) {
if t := *tp; t != nil {
t.Stop()
*tp = nil
}
}
// Helper function to wrap functions with common test
// to lock server and return if events not enabled.
func (s *Server) wrapChk(f func()) func() {
return func() {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
f()
s.mu.Unlock()
}
}
| 1 | 10,276 | Don't need extra line here. | nats-io-nats-server | go |
@@ -637,9 +637,7 @@ namespace Nethermind.JsonRpc.Test.Modules.Proof
.Op(Instruction.DELEGATECALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
-
- // change in test after the modification to how the ReleaseSpec is delivered to the virtual machine
- Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
+ Assert.AreEqual(3, result.Accounts.Length);
}
[Test] | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Linq;
using Nethermind.Blockchain;
using Nethermind.Blockchain.Find;
using Nethermind.Blockchain.Processing;
using Nethermind.Blockchain.Receipts;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Test.Builders;
using Nethermind.Crypto;
using Nethermind.Db;
using Nethermind.Int256;
using Nethermind.Evm;
using Nethermind.JsonRpc.Data;
using Nethermind.JsonRpc.Modules.Proof;
using Nethermind.Logging;
using Nethermind.Serialization.Json;
using Nethermind.Serialization.Rlp;
using Nethermind.Specs;
using Nethermind.Specs.Forks;
using Nethermind.State;
using Nethermind.State.Proofs;
using Nethermind.Trie.Pruning;
using Nethermind.TxPool;
using NUnit.Framework;
using System.Threading.Tasks;
using Nethermind.Evm.Tracing;
using Nethermind.Facade;
using Nethermind.JsonRpc.Modules;
using NSubstitute;
using NSubstitute.Core.DependencyInjection;
namespace Nethermind.JsonRpc.Test.Modules.Proof
{
[Parallelizable(ParallelScope.None)]
[TestFixture(true, true)]
[TestFixture(true, false)]
[TestFixture(false, false)]
public class ProofRpcModuleTests
{
private readonly bool _createSystemAccount;
private readonly bool _useNonZeroGasPrice;
private IProofRpcModule _proofRpcModule;
private IBlockTree _blockTree;
private IDbProvider _dbProvider;
private TestSpecProvider _specProvider;
public ProofRpcModuleTests(bool createSystemAccount, bool useNonZeroGasPrice)
{
_createSystemAccount = createSystemAccount;
_useNonZeroGasPrice = useNonZeroGasPrice;
}
[SetUp]
public async Task Setup()
{
InMemoryReceiptStorage receiptStorage = new();
_specProvider = new TestSpecProvider(London.Instance);
_blockTree = Build.A.BlockTree().WithTransactions(receiptStorage, _specProvider).OfChainLength(10).TestObject;
_dbProvider = await TestMemDbProvider.InitAsync();
ProofModuleFactory moduleFactory = new(
_dbProvider,
_blockTree,
new TrieStore(_dbProvider.StateDb, LimboLogs.Instance).AsReadOnly(),
new CompositeBlockPreprocessorStep(new RecoverSignatures(new EthereumEcdsa(ChainId.Mainnet, LimboLogs.Instance), NullTxPool.Instance, _specProvider, LimboLogs.Instance)),
receiptStorage,
_specProvider,
LimboLogs.Instance);
_proofRpcModule = moduleFactory.Create();
}
[TestCase(true)]
[TestCase(false)]
public void Can_get_transaction(bool withHeader)
{
Keccak txHash = _blockTree.FindBlock(1).Transactions[0].Hash;
TransactionWithProof txWithProof = _proofRpcModule.proof_getTransactionByHash(txHash, withHeader).Data;
Assert.NotNull(txWithProof.Transaction);
Assert.AreEqual(2, txWithProof.TxProof.Length);
if (withHeader)
{
Assert.NotNull(txWithProof.BlockHeader);
}
else
{
Assert.Null(txWithProof.BlockHeader);
}
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionByHash", $"{txHash}", $"{withHeader}");
Assert.True(response.Contains("\"result\""));
}
[TestCase(true)]
[TestCase(false)]
public void When_getting_non_existing_tx_correct_error_code_is_returned(bool withHeader)
{
Keccak txHash = TestItem.KeccakH;
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionByHash", $"{txHash}", $"{withHeader}");
Assert.True(response.Contains($"{ErrorCodes.ResourceNotFound}"));
}
[TestCase(true)]
[TestCase(false)]
public void When_getting_non_existing_receipt_correct_error_code_is_returned(bool withHeader)
{
Keccak txHash = TestItem.KeccakH;
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionReceipt", $"{txHash}", $"{withHeader}");
Assert.True(response.Contains($"{ErrorCodes.ResourceNotFound}"));
}
[Test]
public void On_incorrect_params_returns_correct_error_code()
{
Keccak txHash = TestItem.KeccakH;
// missing with header
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionReceipt", $"{txHash}");
Assert.True(response.Contains($"{ErrorCodes.InvalidParams}"), "missing");
// too many
response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionReceipt", $"{txHash}", "true", "false");
Assert.True(response.Contains($"{ErrorCodes.InvalidParams}"), "too many");
// missing with header
response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionByHash", $"{txHash}");
Assert.True(response.Contains($"{ErrorCodes.InvalidParams}"), "missing");
// too many
response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionByHash", $"{txHash}", "true", "false");
Assert.True(response.Contains($"{ErrorCodes.InvalidParams}"), "too many");
// all wrong
response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{txHash}");
Assert.True(response.Contains($"{ErrorCodes.InvalidParams}"), "missing");
}
[TestCase(true, "{\"jsonrpc\":\"2.0\",\"result\":{\"receipt\":{\"transactionHash\":\"0xc50c34035d0045dae3d949cb7625eea6c826fb755116ead701de9b8d7edeeb29\",\"transactionIndex\":\"0x0\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"cumulativeGasUsed\":\"0x0\",\"gasUsed\":\"0x0\",\"effectiveGasPrice\":\"0x1\",\"to\":null,\"contractAddress\":null,\"logs\":[],\"logsBloom\":\"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"status\":\"0x0\",\"type\":\"0x0\"},\"txProof\":[\"0xf851a011f2d93515d9963f68e6746135f7a786a37ae47ac5b18a5e9fb2e8e9dbf23fad80808080808080a07c3834793d56420b91a53b153d0a67a0ab32cecd250dbc197130eb17e88f32538080808080808080\",\"0xf86530b862f860800182520894000000000000000000000000000000000000000001818024a0b4e030f395ed357d206b58d9a0ded408589a9e26f1a5b41010772cd0d84b8d16a04d9797a972bc308ea635f22455881c41c7c9fb946c93db6f99d2bd529675af13\"],\"receiptProof\":[\"0xf851a053e4a8d7d8438fa45d6b75bbd6fb699b08049c1caf1c21ada42a746ddfb61d0b80808080808080a04de834bd23b53a3d82923ae5f359239b326c66758f2ae636ab934844dba2b9658080808080808080\",\"0xf9010f30b9010bf9010880825208b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0\"],\"blockHeader\":\"0xf901f9a0b3157bcccab04639f6393042690a6c9862deebe88c781f911e8dfd265531e9ffa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5054cffd7f5a0b215b5df35420edb8059cc8585f8201dd31e5e10436437364ca0e1b1585a222beceb3887dc6701802facccf186c2d0f6aa69e26ae0c431fc2b5db9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830f424001833d090080830f424183010203a02ba5557a4c62a513c7e56d1bf13373e0da6bec016755483e91589fe1c6d212e28800000000000003e8\"},\"id\":67}")]
[TestCase(false, "{\"jsonrpc\":\"2.0\",\"result\":{\"receipt\":{\"transactionHash\":\"0xc50c34035d0045dae3d949cb7625eea6c826fb755116ead701de9b8d7edeeb29\",\"transactionIndex\":\"0x0\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"cumulativeGasUsed\":\"0x0\",\"gasUsed\":\"0x0\",\"effectiveGasPrice\":\"0x1\",\"to\":null,\"contractAddress\":null,\"logs\":[],\"logsBloom\":\"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"status\":\"0x0\",\"type\":\"0x0\"},\"txProof\":[\"0xf851a011f2d93515d9963f68e6746135f7a786a37ae47ac5b18a5e9fb2e8e9dbf23fad80808080808080a07c3834793d56420b91a53b153d0a67a0ab32cecd250dbc197130eb17e88f32538080808080808080\",\"0xf86530b862f860800182520894000000000000000000000000000000000000000001818024a0b4e030f395ed357d206b58d9a0ded408589a9e26f1a5b41010772cd0d84b8d16a04d9797a972bc308ea635f22455881c41c7c9fb946c93db6f99d2bd529675af13\"],\"receiptProof\":[\"0xf851a053e4a8d7d8438fa45d6b75bbd6fb699b08049c1caf1c21ada42a746ddfb61d0b80808080808080a04de834bd23b53a3d82923ae5f359239b326c66758f2ae636ab934844dba2b9658080808080808080\",\"0xf9010f30b9010bf9010880825208b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0\"]},\"id\":67}")]
public void Can_get_receipt(bool withHeader, string expectedResult)
{
Keccak txHash = _blockTree.FindBlock(1).Transactions[0].Hash;
ReceiptWithProof receiptWithProof = _proofRpcModule.proof_getTransactionReceipt(txHash, withHeader).Data;
Assert.NotNull(receiptWithProof.Receipt);
Assert.AreEqual(2, receiptWithProof.ReceiptProof.Length);
if (withHeader)
{
Assert.NotNull(receiptWithProof.BlockHeader);
}
else
{
Assert.Null(receiptWithProof.BlockHeader);
}
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionReceipt", $"{txHash}", $"{withHeader}");
Assert.AreEqual(expectedResult, response);
}
[TestCase(true, "{\"jsonrpc\":\"2.0\",\"result\":{\"receipt\":{\"transactionHash\":\"0x8282a49856d07ccb78ad3a59cde08c882448af58dd6ee5dae93f9480f3a167f2\",\"transactionIndex\":\"0x1\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"cumulativeGasUsed\":\"0x7d0\",\"gasUsed\":\"0x3e8\",\"effectiveGasPrice\":\"0x1\",\"from\":\"0x475674cb523a0a2736b7f7534390288fce16982c\",\"to\":\"0x76e68a8696537e4141926f3e528733af9e237d69\",\"contractAddress\":\"0x76e68a8696537e4141926f3e528733af9e237d69\",\"logs\":[{\"removed\":false,\"logIndex\":\"0x2\",\"transactionIndex\":\"0x1\",\"transactionHash\":\"0x8282a49856d07ccb78ad3a59cde08c882448af58dd6ee5dae93f9480f3a167f2\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"address\":\"0x0000000000000000000000000000000000000000\",\"data\":\"0x\",\"topics\":[\"0x0000000000000000000000000000000000000000000000000000000000000000\"]},{\"removed\":false,\"logIndex\":\"0x3\",\"transactionIndex\":\"0x1\",\"transactionHash\":\"0x8282a49856d07ccb78ad3a59cde08c882448af58dd6ee5dae93f9480f3a167f2\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"address\":\"0x0000000000000000000000000000000000000000\",\"data\":\"0x\",\"topics\":[\"0x0000000000000000000000000000000000000000000000000000000000000000\"]}],\"logsBloom\":\"0x00000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000\",\"status\":\"0x0\",\"type\":\"0x0\"},\"txProof\":[\"0xf851a011f2d93515d9963f68e6746135f7a786a37ae47ac5b18a5e9fb2e8e9dbf23fad80808080808080a07c3834793d56420b91a53b153d0a67a0ab32cecd250dbc197130eb17e88f32538080808080808080\",\"0xf86431b861f85f8001825208940000000000000000000000000000000000000000020123a037d5bf7701bca57284acd641137b3d699b273106b2ad71949e004b78b79b0ccea0571a5f11033f7d825edb7a623556e506cd8f982f47fe5270ce36c601a59690bf\"],\"receiptProof\":[\"0xf851a053e4a8d7d8438fa45d6b75bbd6fb699b08049c1caf1c21ada42a746ddfb61d0b80808080808080a04de834bd23b53a3d82923ae5f359239b326c66758f2ae636ab934844dba2b9658080808080808080\",\"0xf9010f31b9010bf901088082a410b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0\"],\"blockHeader\":\"0xf901f9a0b3157bcccab04639f6393042690a6c9862deebe88c781f911e8dfd265531e9ffa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5054cffd7f5a0b215b5df35420edb8059cc8585f8201dd31e5e10436437364ca0e1b1585a222beceb3887dc6701802facccf186c2d0f6aa69e26ae0c431fc2b5db9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830f424001833d090080830f424183010203a02ba5557a4c62a513c7e56d1bf13373e0da6bec016755483e91589fe1c6d212e28800000000000003e8\"},\"id\":67}")]
[TestCase(false, "{\"jsonrpc\":\"2.0\",\"result\":{\"receipt\":{\"transactionHash\":\"0x8282a49856d07ccb78ad3a59cde08c882448af58dd6ee5dae93f9480f3a167f2\",\"transactionIndex\":\"0x1\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"cumulativeGasUsed\":\"0x7d0\",\"gasUsed\":\"0x3e8\",\"effectiveGasPrice\":\"0x1\",\"from\":\"0x475674cb523a0a2736b7f7534390288fce16982c\",\"to\":\"0x76e68a8696537e4141926f3e528733af9e237d69\",\"contractAddress\":\"0x76e68a8696537e4141926f3e528733af9e237d69\",\"logs\":[{\"removed\":false,\"logIndex\":\"0x2\",\"transactionIndex\":\"0x1\",\"transactionHash\":\"0x8282a49856d07ccb78ad3a59cde08c882448af58dd6ee5dae93f9480f3a167f2\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"address\":\"0x0000000000000000000000000000000000000000\",\"data\":\"0x\",\"topics\":[\"0x0000000000000000000000000000000000000000000000000000000000000000\"]},{\"removed\":false,\"logIndex\":\"0x3\",\"transactionIndex\":\"0x1\",\"transactionHash\":\"0x8282a49856d07ccb78ad3a59cde08c882448af58dd6ee5dae93f9480f3a167f2\",\"blockHash\":\"0xb1e7593b3eea16f8caddf3f185858f92f7a9b32db8368821a70a48340479a531\",\"blockNumber\":\"0x1\",\"address\":\"0x0000000000000000000000000000000000000000\",\"data\":\"0x\",\"topics\":[\"0x0000000000000000000000000000000000000000000000000000000000000000\"]}],\"logsBloom\":\"0x00000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000\",\"status\":\"0x0\",\"type\":\"0x0\"},\"txProof\":[\"0xf851a011f2d93515d9963f68e6746135f7a786a37ae47ac5b18a5e9fb2e8e9dbf23fad80808080808080a07c3834793d56420b91a53b153d0a67a0ab32cecd250dbc197130eb17e88f32538080808080808080\",\"0xf86431b861f85f8001825208940000000000000000000000000000000000000000020123a037d5bf7701bca57284acd641137b3d699b273106b2ad71949e004b78b79b0ccea0571a5f11033f7d825edb7a623556e506cd8f982f47fe5270ce36c601a59690bf\"],\"receiptProof\":[\"0xf851a053e4a8d7d8438fa45d6b75bbd6fb699b08049c1caf1c21ada42a746ddfb61d0b80808080808080a04de834bd23b53a3d82923ae5f359239b326c66758f2ae636ab934844dba2b9658080808080808080\",\"0xf9010f31b9010bf901088082a410b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0\"]},\"id\":67}")]
public void Get_receipt_when_block_has_few_receipts(bool withHeader, string expectedResult)
{
IReceiptFinder _receiptFinder = Substitute.For<IReceiptFinder>();
LogEntry[] logEntries = new[] {Build.A.LogEntry.TestObject, Build.A.LogEntry.TestObject};
TxReceipt receipt1 = new TxReceipt()
{
Bloom = new Bloom(logEntries),
Index = 0,
Recipient = TestItem.AddressA,
Sender = TestItem.AddressB,
BlockHash = _blockTree.FindBlock(1).Hash,
BlockNumber = 1,
ContractAddress = TestItem.AddressC,
GasUsed = 1000,
TxHash = _blockTree.FindBlock(1).Transactions[0].Hash,
StatusCode = 0,
GasUsedTotal = 2000,
Logs = logEntries
};
TxReceipt receipt2 = new TxReceipt()
{
Bloom = new Bloom(logEntries),
Index = 1,
Recipient = TestItem.AddressC,
Sender = TestItem.AddressD,
BlockHash = _blockTree.FindBlock(1).Hash,
BlockNumber = 1,
ContractAddress = TestItem.AddressC,
GasUsed = 1000,
TxHash = _blockTree.FindBlock(1).Transactions[1].Hash,
StatusCode = 0,
GasUsedTotal = 2000,
Logs = logEntries
};
Block block = _blockTree.FindBlock(1);
Keccak txHash = _blockTree.FindBlock(1).Transactions[1].Hash;
TxReceipt[] receipts = {receipt1, receipt2};
_receiptFinder.Get(Arg.Any<Block>()).Returns(receipts);
_receiptFinder.Get(Arg.Any<Keccak>()).Returns(receipts);
_receiptFinder.FindBlockHash(Arg.Any<Keccak>()).Returns(_blockTree.FindBlock(1).Hash);
ProofModuleFactory moduleFactory = new ProofModuleFactory(
_dbProvider,
_blockTree,
new TrieStore(_dbProvider.StateDb, LimboLogs.Instance).AsReadOnly(),
new CompositeBlockPreprocessorStep(new RecoverSignatures(new EthereumEcdsa(ChainId.Mainnet, LimboLogs.Instance), NullTxPool.Instance, _specProvider, LimboLogs.Instance)),
_receiptFinder,
_specProvider,
LimboLogs.Instance);
_proofRpcModule = moduleFactory.Create();
ReceiptWithProof receiptWithProof = _proofRpcModule.proof_getTransactionReceipt(txHash, withHeader).Data;
if (withHeader)
{
Assert.NotNull(receiptWithProof.BlockHeader);
}
else
{
Assert.Null(receiptWithProof.BlockHeader);
}
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_getTransactionReceipt", $"{txHash}", $"{withHeader}");
Assert.AreEqual(expectedResult, response);
}
[Test]
public void Can_call()
{
StateProvider stateProvider = CreateInitialState(null);
Keccak root = stateProvider.StateRoot;
Block block = Build.A.Block.WithParent(_blockTree.Head).WithStateRoot(root).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, block);
// would need to setup state root somehow...
TransactionForRpc tx = new()
{
From = TestItem.AddressA,
To = TestItem.AddressB,
GasPrice = _useNonZeroGasPrice ? 10.GWei() : 0
};
_proofRpcModule.proof_call(tx, new BlockParameter(block.Number));
EthereumJsonSerializer serializer = new();
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{serializer.Serialize(tx)}", $"{block.Number}");
Assert.True(response.Contains("\"result\""));
}
[Test]
public void Can_call_by_hash()
{
StateProvider stateProvider = CreateInitialState(null);
Keccak root = stateProvider.StateRoot;
Block block = Build.A.Block.WithParent(_blockTree.Head).WithStateRoot(root).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, block);
// would need to setup state root somehow...
TransactionForRpc tx = new()
{
From = TestItem.AddressA,
To = TestItem.AddressB,
GasPrice = _useNonZeroGasPrice ? 10.GWei() : 0
};
_proofRpcModule.proof_call(tx, new BlockParameter(block.Hash));
EthereumJsonSerializer serializer = new();
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{serializer.Serialize(tx)}", $"{block.Hash}");
Assert.True(response.Contains("\"result\""));
}
[Test]
public void Can_call_by_hash_canonical()
{
Block lastHead = _blockTree.Head;
Block block = Build.A.Block.WithParent(lastHead).TestObject;
Block newBlockOnMain = Build.A.Block.WithParent(lastHead).WithDifficulty(block.Difficulty + 1).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, block);
BlockTreeBuilder.AddBlock(_blockTree, newBlockOnMain);
// would need to setup state root somehow...
TransactionForRpc tx = new()
{
From = TestItem.AddressA,
To = TestItem.AddressB,
GasPrice = _useNonZeroGasPrice ? 10.GWei() : 0
};
EthereumJsonSerializer serializer = new();
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{serializer.Serialize(tx)}", $"{{\"blockHash\" : \"{block.Hash}\", \"requireCanonical\" : true}}");
Assert.True(response.Contains("-32000"));
response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{serializer.Serialize(tx)}", $"{{\"blockHash\" : \"{TestItem.KeccakG}\", \"requireCanonical\" : true}}");
Assert.True(response.Contains("-32001"));
}
[Test]
public void Can_call_with_block_hashes()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.BlockHeaders.Length);
}
[Test]
public void Can_call_with_many_block_hashes()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.PushData("0x02")
.Op(Instruction.BLOCKHASH)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(3, result.BlockHeaders.Length);
}
[Test]
public void Can_call_with_same_block_hash_many_time()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.BlockHeaders.Length);
}
[Test]
public void Can_call_with_storage_load()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.Op(Instruction.SLOAD)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(1 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_many_storage_loads()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.Op(Instruction.SLOAD)
.PushData("0x02")
.Op(Instruction.SLOAD)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(1 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_storage_write()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.PushData("0x01")
.Op(Instruction.SSTORE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(1 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_extcodecopy()
{
byte[] code = Prepare.EvmCode
.PushData("0x20")
.PushData("0x00")
.PushData("0x00")
.PushData(TestItem.AddressC)
.Op(Instruction.EXTCODECOPY)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_extcodecopy_to_system_account()
{
byte[] code = Prepare.EvmCode
.PushData("0x20")
.PushData("0x00")
.PushData("0x00")
.PushData(Address.SystemUser)
.Op(Instruction.EXTCODECOPY)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_extcodesize()
{
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.EXTCODESIZE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_extcodesize_to_system_account()
{
byte[] code = Prepare.EvmCode
.PushData(Address.SystemUser)
.Op(Instruction.EXTCODESIZE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_extcodehash()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.EXTCODEHASH)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_extcodehash_to_system_account()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(Address.SystemUser)
.Op(Instruction.EXTCODEHASH)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_just_basic_addresses()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.Op(Instruction.STOP)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(1 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_balance()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.BALANCE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_self_balance()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.Op(Instruction.SELFBALANCE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(1 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_balance_of_system_account()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(Address.SystemUser)
.Op(Instruction.BALANCE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_call_to_system_account_with_zero_value()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(Address.SystemUser)
.PushData(1000000)
.Op(Instruction.CALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_static_call_to_system_account()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(Address.SystemUser)
.PushData(1000000)
.Op(Instruction.STATICCALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_delegate_call_to_system_account()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(Address.SystemUser)
.PushData(1000000)
.Op(Instruction.DELEGATECALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_call_to_system_account_with_non_zero_value()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(1)
.PushData(Address.SystemUser)
.PushData(1000000)
.Op(Instruction.CALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_call_with_zero_value()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(TestItem.AddressC)
.PushData(1000000)
.Op(Instruction.CALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_static_call()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(TestItem.AddressC)
.PushData(1000000)
.Op(Instruction.STATICCALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_delegate_call()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(TestItem.AddressC)
.PushData(1000000)
.Op(Instruction.DELEGATECALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
// change in test after the modification to how the ReleaseSpec is delivered to the virtual machine
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_call_with_non_zero_value()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(0)
.PushData(1)
.PushData(TestItem.AddressC)
.PushData(1000000)
.Op(Instruction.CALL)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_self_destruct()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.SELFDESTRUCT)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_self_destruct_to_system_account()
{
_specProvider.SpecToReturn = MuirGlacier.Instance;
byte[] code = Prepare.EvmCode
.PushData(Address.SystemUser)
.Op(Instruction.SELFDESTRUCT)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(2, result.Accounts.Length);
}
[Test]
public void Can_call_with_many_storage_writes()
{
byte[] code = Prepare.EvmCode
.PushData("0x01")
.PushData("0x01")
.Op(Instruction.SSTORE)
.PushData("0x02")
.PushData("0x02")
.Op(Instruction.SSTORE)
.Done;
CallResultWithProof result = TestCallWithCode(code);
Assert.AreEqual(1 + (_useNonZeroGasPrice ? 1 : 0), result.Accounts.Length);
}
[Test]
public void Can_call_with_mix_of_everything()
{
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.BALANCE)
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.PushData("0x02")
.Op(Instruction.BLOCKHASH)
.PushData("0x01")
.Op(Instruction.SLOAD)
.PushData("0x02")
.Op(Instruction.SLOAD)
.PushData("0x01")
.PushData("0x01")
.Op(Instruction.SSTORE)
.PushData("0x03")
.PushData("0x03")
.Op(Instruction.SSTORE)
.Done;
TestCallWithCode(code);
}
[Test]
public void Can_call_with_mix_of_everything_and_storage()
{
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.BALANCE)
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.PushData("0x02")
.Op(Instruction.BLOCKHASH)
.PushData("0x01")
.Op(Instruction.SLOAD)
.PushData("0x02")
.Op(Instruction.SLOAD)
.PushData("0x01")
.PushData("0x01")
.Op(Instruction.SSTORE)
.PushData("0x03")
.PushData("0x03")
.Op(Instruction.SSTORE)
.Done;
TestCallWithStorageAndCode(code, _useNonZeroGasPrice ? 10.GWei() : 0);
}
[Test]
public void Can_call_with_mix_of_everything_and_storage_from_another_account_wrong_nonce()
{
byte[] code = Prepare.EvmCode
.PushData(TestItem.AddressC)
.Op(Instruction.BALANCE)
.PushData("0x01")
.Op(Instruction.BLOCKHASH)
.PushData("0x02")
.Op(Instruction.BLOCKHASH)
.PushData("0x01")
.Op(Instruction.SLOAD)
.PushData("0x02")
.Op(Instruction.SLOAD)
.PushData("0x01")
.PushData("0x01")
.Op(Instruction.SSTORE)
.PushData("0x03")
.PushData("0x03")
.Op(Instruction.SSTORE)
.Done;
TestCallWithStorageAndCode(code, 0, TestItem.AddressD);
}
private CallResultWithProof TestCallWithCode(byte[] code, Address from = null)
{
StateProvider stateProvider = CreateInitialState(code);
Keccak root = stateProvider.StateRoot;
Block block = Build.A.Block.WithParent(_blockTree.Head).WithStateRoot(root).WithBeneficiary(TestItem.AddressD).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, block);
Block blockOnTop = Build.A.Block.WithParent(block).WithStateRoot(root).WithBeneficiary(TestItem.AddressD).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, blockOnTop);
// would need to setup state root somehow...
TransactionForRpc tx = new()
{
From = from,
To = TestItem.AddressB,
GasPrice = _useNonZeroGasPrice ? 10.GWei() : 0
};
CallResultWithProof callResultWithProof = _proofRpcModule.proof_call(tx, new BlockParameter(blockOnTop.Number)).Data;
Assert.Greater(callResultWithProof.Accounts.Length, 0);
foreach (AccountProof accountProof in callResultWithProof.Accounts)
{
ProofVerifier.Verify(accountProof.Proof, block.StateRoot);
foreach (StorageProof storageProof in accountProof.StorageProofs)
{
ProofVerifier.Verify(storageProof.Proof, accountProof.StorageRoot);
}
}
EthereumJsonSerializer serializer = new();
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{serializer.Serialize(tx)}", $"{blockOnTop.Number}");
Assert.True(response.Contains("\"result\""));
return callResultWithProof;
}
private void TestCallWithStorageAndCode(byte[] code, UInt256 gasPrice, Address from = null)
{
StateProvider stateProvider = CreateInitialState(code);
StorageProvider storageProvider = new(new TrieStore(_dbProvider.StateDb, LimboLogs.Instance), stateProvider, LimboLogs.Instance);
for (int i = 0; i < 10000; i++)
{
storageProvider.Set(new StorageCell(TestItem.AddressB, (UInt256)i), i.ToBigEndianByteArray());
}
storageProvider.Commit();
storageProvider.CommitTrees(0);
stateProvider.Commit(MainnetSpecProvider.Instance.GenesisSpec, NullStateTracer.Instance);
stateProvider.CommitTree(0);
Keccak root = stateProvider.StateRoot;
Block block = Build.A.Block.WithParent(_blockTree.Head).WithStateRoot(root).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, block);
Block blockOnTop = Build.A.Block.WithParent(block).WithStateRoot(root).TestObject;
BlockTreeBuilder.AddBlock(_blockTree, blockOnTop);
// would need to setup state root somehow...
TransactionForRpc tx = new()
{
// we are testing system transaction here when From is null
From = from,
To = TestItem.AddressB,
GasPrice = gasPrice,
Nonce = 1000
};
CallResultWithProof callResultWithProof = _proofRpcModule.proof_call(tx, new BlockParameter(blockOnTop.Number)).Data;
Assert.Greater(callResultWithProof.Accounts.Length, 0);
// just the keys for debugging
Span<byte> span = stackalloc byte[32];
new UInt256(0).ToBigEndian(span);
Keccak k0 = Keccak.Compute(span);
// just the keys for debugging
new UInt256(1).ToBigEndian(span);
Keccak k1 = Keccak.Compute(span);
// just the keys for debugging
new UInt256(2).ToBigEndian(span);
Keccak k2 = Keccak.Compute(span);
foreach (AccountProof accountProof in callResultWithProof.Accounts)
{
// this is here for diagnostics - so you can read what happens in the test
// generally the account here should be consistent with the values inside the proof
// the exception will be thrown if the account did not exist before the call
Account account;
try
{
account = new AccountDecoder().Decode(new RlpStream(ProofVerifier.Verify(accountProof.Proof, block.StateRoot)));
}
catch (Exception)
{
// ignored
}
foreach (StorageProof storageProof in accountProof.StorageProofs)
{
// we read the values here just to allow easier debugging so you can confirm that the value is same as the one in the proof and in the trie
byte[] value = ProofVerifier.Verify(storageProof.Proof, accountProof.StorageRoot);
}
}
EthereumJsonSerializer serializer = new();
string response = RpcTest.TestSerializedRequest(_proofRpcModule, "proof_call", $"{serializer.Serialize(tx)}", $"{blockOnTop.Number}");
Assert.True(response.Contains("\"result\""));
}
private StateProvider CreateInitialState(byte[] code)
{
StateProvider stateProvider = new(new TrieStore(_dbProvider.StateDb, LimboLogs.Instance), _dbProvider.CodeDb, LimboLogs.Instance);
AddAccount(stateProvider, TestItem.AddressA, 1.Ether());
AddAccount(stateProvider, TestItem.AddressB, 1.Ether());
if (code != null)
{
AddCode(stateProvider, TestItem.AddressB, code);
}
if (_createSystemAccount)
{
AddAccount(stateProvider, Address.SystemUser, 1.Ether());
}
stateProvider.CommitTree(0);
return stateProvider;
}
private void AddAccount(StateProvider stateProvider, Address account, UInt256 initialBalance)
{
stateProvider.CreateAccount(account, initialBalance);
stateProvider.Commit(MuirGlacier.Instance, NullStateTracer.Instance);
}
private void AddCode(StateProvider stateProvider, Address account, byte[] code)
{
Keccak codeHash = stateProvider.UpdateCode(code);
stateProvider.UpdateCodeHash(account, codeHash, MuirGlacier.Instance);
stateProvider.Commit(MainnetSpecProvider.Instance.GenesisSpec, NullStateTracer.Instance);
}
}
}
| 1 | 26,309 | For the first look, this change is strange. Were we passing these tests before? | NethermindEth-nethermind | .cs |
@@ -21,9 +21,16 @@ import (
)
func gracefullyStopProcess(pid int) error {
- cmd := exec.Command("taskkill", "/pid", strconv.Itoa(pid))
+ fmt.Printf("Stop...")
+ // process on windows will not stop unless forced with /f
+ cmd := exec.Command("taskkill", "/pid", strconv.Itoa(pid), "/f")
if err := cmd.Run(); err != nil {
return fmt.Errorf("taskkill: %v", err)
}
return nil
}
+
+func matchProcess(appname string, processname string) bool {
+ // on Windows the process name will include the extension
+ return appname == processname+".exe"
+} | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"fmt"
"os/exec"
"strconv"
)
func gracefullyStopProcess(pid int) error {
cmd := exec.Command("taskkill", "/pid", strconv.Itoa(pid))
if err := cmd.Run(); err != nil {
return fmt.Errorf("taskkill: %v", err)
}
return nil
}
| 1 | 13,341 | On Windows, doesn't `os.Args[0]` include the `.exe`? What if you run the program like `caddy.exe`? | caddyserver-caddy | go |
@@ -112,13 +112,8 @@ type OpenvpnConfigNegotiator struct {
vpnConfig openvpn_service.VPNConfig
}
-// ConsumeConfig doesn't do anything on the openvpn side, since it's not required here
-func (ocn *OpenvpnConfigNegotiator) ConsumeConfig(json.RawMessage) error {
- return nil
-}
-
// ProvideConfig returns the config for user
-func (ocn *OpenvpnConfigNegotiator) ProvideConfig() (session.ServiceConfiguration, error) {
+func (ocn *OpenvpnConfigNegotiator) ProvideConfig(json.RawMessage) (session.ServiceConfiguration, error) {
return &ocn.vpnConfig, nil
}
| 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package service
import (
"encoding/json"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/go-openvpn/openvpn"
"github.com/mysteriumnetwork/go-openvpn/openvpn/middlewares/server/auth"
"github.com/mysteriumnetwork/go-openvpn/openvpn/middlewares/state"
"github.com/mysteriumnetwork/go-openvpn/openvpn/tls"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/location"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/market"
"github.com/mysteriumnetwork/node/nat"
openvpn_service "github.com/mysteriumnetwork/node/services/openvpn"
openvpn_discovery "github.com/mysteriumnetwork/node/services/openvpn/discovery"
openvpn_session "github.com/mysteriumnetwork/node/services/openvpn/session"
"github.com/mysteriumnetwork/node/session"
)
// NewManager creates new instance of Openvpn service
func NewManager(
nodeOptions node.Options,
serviceOptions Options,
ipResolver ip.Resolver,
locationResolver location.Resolver,
sessionMap openvpn_session.SessionMap,
) *Manager {
natService := nat.NewService()
sessionValidator := openvpn_session.NewValidator(sessionMap, identity.NewExtractor())
return &Manager{
locationResolver: locationResolver,
ipResolver: ipResolver,
natService: natService,
proposalFactory: newProposalFactory(serviceOptions),
sessionConfigNegotiatorFactory: newSessionConfigNegotiatorFactory(serviceOptions),
vpnServerConfigFactory: newServerConfigFactory(nodeOptions, serviceOptions),
vpnServerFactory: newServerFactory(nodeOptions, sessionValidator),
}
}
func newProposalFactory(serviceOptions Options) ProposalFactory {
return func(currentLocation market.Location) market.ServiceProposal {
return openvpn_discovery.NewServiceProposalWithLocation(currentLocation, serviceOptions.OpenvpnProtocol)
}
}
// newServerConfigFactory returns function generating server config and generates required security primitives
func newServerConfigFactory(nodeOptions node.Options, serviceOptions Options) ServerConfigFactory {
return func(secPrimitives *tls.Primitives) *openvpn_service.ServerConfig {
// TODO: check nodeOptions for --openvpn-transport option
return openvpn_service.NewServerConfig(
nodeOptions.Directories.Runtime,
nodeOptions.Directories.Config,
"10.8.0.0", "255.255.255.0",
secPrimitives,
serviceOptions.OpenvpnPort,
serviceOptions.OpenvpnProtocol,
)
}
}
func newServerFactory(nodeOptions node.Options, sessionValidator *openvpn_session.Validator) ServerFactory {
return func(config *openvpn_service.ServerConfig) openvpn.Process {
return openvpn.CreateNewProcess(
nodeOptions.Openvpn.BinaryPath(),
config.GenericConfig,
auth.NewMiddleware(sessionValidator.Validate, sessionValidator.Cleanup),
state.NewMiddleware(vpnStateCallback),
)
}
}
// newSessionConfigNegotiatorFactory returns function generating session config for remote client
func newSessionConfigNegotiatorFactory(serviceOptions Options) SessionConfigNegotiatorFactory {
return func(secPrimitives *tls.Primitives, outboundIP, publicIP string) session.ConfigNegotiator {
serverIP := vpnServerIP(serviceOptions, outboundIP, publicIP)
return &OpenvpnConfigNegotiator{
vpnConfig: openvpn_service.VPNConfig{
RemoteIP: serverIP,
RemotePort: serviceOptions.OpenvpnPort,
RemoteProtocol: serviceOptions.OpenvpnProtocol,
TLSPresharedKey: secPrimitives.PresharedKey.ToPEMFormat(),
CACertificate: secPrimitives.CertificateAuthority.ToPEMFormat(),
},
}
}
}
// OpenvpnConfigNegotiator knows how to send the openvpn config to the consumer
type OpenvpnConfigNegotiator struct {
vpnConfig openvpn_service.VPNConfig
}
// ConsumeConfig doesn't do anything on the openvpn side, since it's not required here
func (ocn *OpenvpnConfigNegotiator) ConsumeConfig(json.RawMessage) error {
return nil
}
// ProvideConfig returns the config for user
func (ocn *OpenvpnConfigNegotiator) ProvideConfig() (session.ServiceConfiguration, error) {
return &ocn.vpnConfig, nil
}
func vpnServerIP(serviceOptions Options, outboundIP, publicIP string) string {
//TODO public ip could be overridden by arg nodeOptions if needed
if publicIP != outboundIP {
log.Warnf(
`WARNING: It seems that publicly visible ip: [%s] does not match your local machines ip: [%s].
You should probably need to do port forwarding on your router: %s:%v -> %s:%v.`,
publicIP,
outboundIP,
publicIP,
serviceOptions.OpenvpnPort,
outboundIP,
serviceOptions.OpenvpnPort,
)
}
return publicIP
}
| 1 | 13,074 | I don't see anywhere where `ServiceConfiguration` interface implementation would return any kind of error. Not sure if its needed, but we could ALWAYS return a valid 'pseudo' configuration without possibility of error. | mysteriumnetwork-node | go |
@@ -36,4 +36,15 @@ public class CommonRenderingUtilTest {
assertThat(CommonRenderingUtil.stripQuotes("'a'bc'")).isEqualTo("'a'bc'");
assertThat(CommonRenderingUtil.stripQuotes("\"a\"bc\"")).isEqualTo("\"a\"bc\"");
}
+
+ @Test
+ public void testGetDocLines() {
+ // Check that we don't care which form of line break is used.
+ assertThat(CommonRenderingUtil.getDocLines("a\nb\nc")).containsExactly("a", "b", "c").inOrder();
+ assertThat(CommonRenderingUtil.getDocLines("a\rb\rc")).containsExactly("a", "b", "c").inOrder();
+ assertThat(CommonRenderingUtil.getDocLines("a\r\nb\r\nc"))
+ .containsExactly("a", "b", "c")
+ .inOrder();
+ assertThat(CommonRenderingUtil.getDocLines("")).isEmpty();
+ }
} | 1 | /* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util;
import static com.google.common.truth.Truth.assertThat;
import org.junit.Test;
public class CommonRenderingUtilTest {
@Test
public void testStripQuotes() {
assertThat(CommonRenderingUtil.stripQuotes("\"abc\"")).isEqualTo("abc");
assertThat(CommonRenderingUtil.stripQuotes("'abc'")).isEqualTo("abc");
// Unbalanced quotes disables stripping.
assertThat(CommonRenderingUtil.stripQuotes("\"abc")).isEqualTo("\"abc");
assertThat(CommonRenderingUtil.stripQuotes("'abc")).isEqualTo("'abc");
assertThat(CommonRenderingUtil.stripQuotes("abc\"")).isEqualTo("abc\"");
assertThat(CommonRenderingUtil.stripQuotes("abc'")).isEqualTo("abc'");
// Having quote character inside also disables stripping.
// NOTE(pongad): I'm not sure why; keeping old behavior for now.
assertThat(CommonRenderingUtil.stripQuotes("'a'bc'")).isEqualTo("'a'bc'");
assertThat(CommonRenderingUtil.stripQuotes("\"a\"bc\"")).isEqualTo("\"a\"bc\"");
}
}
| 1 | 26,665 | Do we actually want to split on `\r`? I thought that the "newline sequence" was only `\r\n` on windows. | googleapis-gapic-generator | java |
@@ -273,10 +273,14 @@ void t_json_generator::write_type_spec(t_type* ttype) {
write_key_and_string("valueTypeId", get_type_name(vtype));
write_type_spec_object("keyType", ktype);
write_type_spec_object("valueType", vtype);
- } else if (ttype->is_list() || ttype->is_set()) {
+ } else if (ttype->is_list()) {
t_type* etype = ((t_list*)ttype)->get_elem_type();
write_key_and_string("elemTypeId", get_type_name(etype));
write_type_spec_object("elemType", etype);
+ } else if (ttype->is_set()) {
+ t_type* etype = ((t_set*)ttype)->get_elem_type();
+ write_key_and_string("elemTypeId", get_type_name(etype));
+ write_type_spec_object("elemType", etype);
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* Contains some contributions under the Thrift Software License.
* Please see doc/old-thrift-license.txt in the Thrift distribution for
* details.
*/
#include <fstream>
#include <iostream>
#include <sstream>
#include <limits>
#include <stdlib.h>
#include <sys/stat.h>
#include <sstream>
#include "thrift/platform.h"
#include "thrift/generate/t_generator.h"
using std::map;
using std::ofstream;
using std::ostream;
using std::ostringstream;
using std::string;
using std::stringstream;
using std::vector;
using std::stack;
static const string endl = "\n";
static const string quot = "\"";
static const bool NO_INDENT = false;
static const bool FORCE_STRING = true;
class t_json_generator : public t_generator {
public:
t_json_generator(t_program* program,
const std::map<std::string, std::string>& parsed_options,
const std::string& option_string)
: t_generator(program) {
(void)option_string;
std::map<std::string, std::string>::const_iterator iter;
should_merge_includes_ = false;
for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) {
if( iter->first.compare("merge") == 0) {
should_merge_includes_ = true;
} else {
throw "unknown option json:" + iter->first;
}
}
out_dir_base_ = "gen-json";
}
virtual ~t_json_generator() {}
/**
* Init and close methods
*/
void init_generator();
void close_generator();
void generate_typedef(t_typedef* ttypedef);
void generate_enum(t_enum* tenum);
void generate_program();
void generate_function(t_function* tfunc);
void generate_field(t_field* field);
void generate_service(t_service* tservice);
void generate_struct(t_struct* tstruct);
private:
bool should_merge_includes_;
std::ofstream f_json_;
std::stack<bool> comma_needed_;
template <typename T>
string number_to_string(T t) {
std::ostringstream out;
out.imbue(std::locale::classic());
out.precision(std::numeric_limits<T>::digits10);
out << t;
return out.str();
}
template <typename T>
void write_number(T n) {
f_json_ << number_to_string(n);
}
string get_type_name(t_type* ttype);
string get_qualified_name(t_type* ttype);
void start_object(bool should_indent = true);
void start_array();
void end_object();
void end_array();
void write_comma_if_needed();
void indicate_comma_needed();
string escape_json_string(const string& input);
string json_str(const string& str);
void merge_includes(t_program*);
void generate_constant(t_const* con);
void write_type_spec_entry(const char* name, t_type* ttype);
void write_type_spec_object(const char* name, t_type* ttype);
void write_type_spec(t_type* ttype);
void write_string(const string& value);
void write_value(t_type* tvalue);
void write_const_value(t_const_value* value, bool force_string = false);
void write_key_and(string key);
void write_key_and_string(string key, string val);
void write_key_and_integer(string key, int val);
void write_key_and_bool(string key, bool val);
};
void t_json_generator::init_generator() {
MKDIR(get_out_dir().c_str());
string f_json_name = get_out_dir() + program_->get_name() + ".json";
f_json_.open(f_json_name.c_str());
// Merge all included programs into this one so we can output one big file.
if (should_merge_includes_) {
merge_includes(program_);
}
}
string t_json_generator::escape_json_string(const string& input) {
std::ostringstream ss;
for (std::string::const_iterator iter = input.begin(); iter != input.end(); iter++) {
switch (*iter) {
case '\\':
ss << "\\\\";
break;
case '"':
ss << "\\\"";
break;
case '/':
ss << "\\/";
break;
case '\b':
ss << "\\b";
break;
case '\f':
ss << "\\f";
break;
case '\n':
ss << "\\n";
break;
case '\r':
ss << "\\r";
break;
case '\t':
ss << "\\t";
break;
default:
ss << *iter;
break;
}
}
return ss.str();
}
void t_json_generator::start_object(bool should_indent) {
f_json_ << (should_indent ? indent() : "") << "{" << endl;
indent_up();
comma_needed_.push(false);
}
void t_json_generator::start_array() {
f_json_ << "[" << endl;
indent_up();
comma_needed_.push(false);
}
void t_json_generator::write_comma_if_needed() {
if (comma_needed_.top()) {
f_json_ << "," << endl;
}
}
void t_json_generator::indicate_comma_needed() {
comma_needed_.pop();
comma_needed_.push(true);
}
void t_json_generator::write_key_and(string key) {
write_comma_if_needed();
indent(f_json_) << json_str(key) << ": ";
indicate_comma_needed();
}
void t_json_generator::write_key_and_integer(string key, int val) {
write_comma_if_needed();
indent(f_json_) << json_str(key) << ": " << number_to_string(val);
indicate_comma_needed();
}
void t_json_generator::write_key_and_string(string key, string val) {
write_comma_if_needed();
indent(f_json_) << json_str(key) << ": " << json_str(val);
indicate_comma_needed();
}
void t_json_generator::write_key_and_bool(string key, bool val) {
write_comma_if_needed();
indent(f_json_) << json_str(key) << ": " << (val ? "true" : "false");
indicate_comma_needed();
}
void t_json_generator::end_object() {
indent_down();
f_json_ << endl << indent() << "}";
comma_needed_.pop();
}
void t_json_generator::end_array() {
indent_down();
if (comma_needed_.top()) {
f_json_ << endl;
}
indent(f_json_) << "]";
comma_needed_.pop();
}
void t_json_generator::write_type_spec_object(const char* name, t_type* ttype) {
ttype = ttype->get_true_type();
if (ttype->is_struct() || ttype->is_xception() || ttype->is_container()) {
write_key_and(name);
start_object(NO_INDENT);
write_key_and("typeId");
write_type_spec(ttype);
end_object();
}
}
void t_json_generator::write_type_spec_entry(const char* name, t_type* ttype) {
write_key_and(name);
write_type_spec(ttype);
}
void t_json_generator::write_type_spec(t_type* ttype) {
ttype = ttype->get_true_type();
write_string(get_type_name(ttype));
if (ttype->is_struct() || ttype->is_xception()) {
write_key_and_string("class", get_qualified_name(ttype));
} else if (ttype->is_map()) {
t_type* ktype = ((t_map*)ttype)->get_key_type();
t_type* vtype = ((t_map*)ttype)->get_val_type();
write_key_and_string("keyTypeId", get_type_name(ktype));
write_key_and_string("valueTypeId", get_type_name(vtype));
write_type_spec_object("keyType", ktype);
write_type_spec_object("valueType", vtype);
} else if (ttype->is_list() || ttype->is_set()) {
t_type* etype = ((t_list*)ttype)->get_elem_type();
write_key_and_string("elemTypeId", get_type_name(etype));
write_type_spec_object("elemType", etype);
}
}
void t_json_generator::close_generator() {
f_json_ << endl;
f_json_.close();
}
void t_json_generator::merge_includes(t_program* program) {
vector<t_program*> includes = program->get_includes();
vector<t_program*>::iterator inc_iter;
for (inc_iter = includes.begin(); inc_iter != includes.end(); ++inc_iter) {
t_program* include = *inc_iter;
// recurse in case we get crazy
merge_includes(include);
// merge enums
vector<t_enum*> enums = include->get_enums();
vector<t_enum*>::iterator en_iter;
for (en_iter = enums.begin(); en_iter != enums.end(); ++en_iter) {
program->add_enum(*en_iter);
}
// merge typedefs
vector<t_typedef*> typedefs = include->get_typedefs();
vector<t_typedef*>::iterator td_iter;
for (td_iter = typedefs.begin(); td_iter != typedefs.end(); ++td_iter) {
program->add_typedef(*td_iter);
}
// merge structs
vector<t_struct*> objects = include->get_objects();
vector<t_struct*>::iterator o_iter;
for (o_iter = objects.begin(); o_iter != objects.end(); ++o_iter) {
program->add_struct(*o_iter);
}
// merge constants
vector<t_const*> consts = include->get_consts();
vector<t_const*>::iterator c_iter;
for (c_iter = consts.begin(); c_iter != consts.end(); ++c_iter) {
program->add_const(*c_iter);
}
// merge services
vector<t_service*> services = include->get_services();
vector<t_service*>::iterator sv_iter;
for (sv_iter = services.begin(); sv_iter != services.end(); ++sv_iter) {
program->add_service(*sv_iter);
}
}
}
void t_json_generator::generate_program() {
init_generator();
start_object();
write_key_and_string("name", program_->get_name());
if (program_->has_doc()) {
write_key_and_string("doc", program_->get_doc());
}
// When merging includes, the "namespaces" and "includes" sections
// become ambiguous, so just skip them.
if (!should_merge_includes_) {
// Generate namespaces
write_key_and("namespaces");
start_object(NO_INDENT);
const map<string, string>& namespaces = program_->get_namespaces();
map<string, string>::const_iterator ns_it;
for (ns_it = namespaces.begin(); ns_it != namespaces.end(); ++ns_it) {
write_key_and_string(ns_it->first, ns_it->second);
indicate_comma_needed();
}
end_object();
// Generate includes
write_key_and("includes");
start_array();
const vector<t_program*> includes = program_->get_includes();
vector<t_program*>::const_iterator inc_it;
for (inc_it = includes.begin(); inc_it != includes.end(); ++inc_it) {
write_comma_if_needed();
write_string((*inc_it)->get_name());
indicate_comma_needed();
}
end_array();
}
// Generate enums
write_key_and("enums");
start_array();
vector<t_enum*> enums = program_->get_enums();
vector<t_enum*>::iterator en_iter;
for (en_iter = enums.begin(); en_iter != enums.end(); ++en_iter) {
write_comma_if_needed();
generate_enum(*en_iter);
indicate_comma_needed();
}
end_array();
// Generate typedefs
write_key_and("typedefs");
start_array();
vector<t_typedef*> typedefs = program_->get_typedefs();
vector<t_typedef*>::iterator td_iter;
for (td_iter = typedefs.begin(); td_iter != typedefs.end(); ++td_iter) {
write_comma_if_needed();
generate_typedef(*td_iter);
indicate_comma_needed();
}
end_array();
// Generate structs, exceptions, and unions in declared order
write_key_and("structs");
start_array();
vector<t_struct*> objects = program_->get_objects();
vector<t_struct*>::iterator o_iter;
for (o_iter = objects.begin(); o_iter != objects.end(); ++o_iter) {
write_comma_if_needed();
if ((*o_iter)->is_xception()) {
generate_xception(*o_iter);
} else {
generate_struct(*o_iter);
}
indicate_comma_needed();
}
end_array();
// Generate constants
write_key_and("constants");
start_array();
vector<t_const*> consts = program_->get_consts();
vector<t_const*>::iterator c_iter;
for (c_iter = consts.begin(); c_iter != consts.end(); ++c_iter) {
write_comma_if_needed();
generate_constant(*c_iter);
indicate_comma_needed();
}
end_array();
// Generate services
write_key_and("services");
start_array();
vector<t_service*> services = program_->get_services();
vector<t_service*>::iterator sv_iter;
for (sv_iter = services.begin(); sv_iter != services.end(); ++sv_iter) {
write_comma_if_needed();
generate_service(*sv_iter);
indicate_comma_needed();
}
end_array();
end_object();
// Close the generator
close_generator();
}
void t_json_generator::generate_typedef(t_typedef* ttypedef) {
start_object();
write_key_and_string("name", get_qualified_name(ttypedef));
write_key_and_string("typeId", get_type_name(ttypedef->get_true_type()));
write_type_spec_object("type", ttypedef->get_true_type());
if (ttypedef->has_doc()) {
write_key_and_string("doc", ttypedef->get_doc());
}
end_object();
}
void t_json_generator::write_string(const string& value) {
f_json_ << quot << escape_json_string(value) << quot;
}
void t_json_generator::write_const_value(t_const_value* value, bool should_force_string) {
switch (value->get_type()) {
case t_const_value::CV_IDENTIFIER:
case t_const_value::CV_INTEGER:
if (should_force_string) {
write_string(number_to_string(value->get_integer()));
} else {
write_number(value->get_integer());
}
break;
case t_const_value::CV_DOUBLE:
if (should_force_string) {
write_string(number_to_string(value->get_double()));
} else {
write_number(value->get_double());
}
break;
case t_const_value::CV_STRING:
write_string(value->get_string());
break;
case t_const_value::CV_LIST: {
start_array();
std::vector<t_const_value*> list = value->get_list();
std::vector<t_const_value*>::iterator lit;
for (lit = list.begin(); lit != list.end(); ++lit) {
write_comma_if_needed();
f_json_ << indent();
write_const_value(*lit);
indicate_comma_needed();
}
end_array();
break;
}
case t_const_value::CV_MAP: {
start_object(NO_INDENT);
std::map<t_const_value*, t_const_value*> map = value->get_map();
std::map<t_const_value*, t_const_value*>::iterator mit;
for (mit = map.begin(); mit != map.end(); ++mit) {
write_comma_if_needed();
f_json_ << indent();
// JSON objects only allow string keys
write_const_value(mit->first, FORCE_STRING);
f_json_ << ": ";
write_const_value(mit->second);
indicate_comma_needed();
}
end_object();
break;
}
default:
f_json_ << "null";
break;
}
}
string t_json_generator::json_str(const string& str) {
return quot + escape_json_string(str) + quot;
}
void t_json_generator::generate_constant(t_const* con) {
start_object();
write_key_and_string("name", con->get_name());
write_key_and_string("typeId", get_type_name(con->get_type()));
write_type_spec_object("type", con->get_type());
if (con->has_doc()) {
write_key_and_string("doc", con->get_doc());
}
write_key_and("value");
write_const_value(con->get_value());
end_object();
}
void t_json_generator::generate_enum(t_enum* tenum) {
start_object();
write_key_and_string("name", tenum->get_name());
if (tenum->has_doc()) {
write_key_and_string("doc", tenum->get_doc());
}
write_key_and("members");
start_array();
vector<t_enum_value*> values = tenum->get_constants();
vector<t_enum_value*>::iterator val_iter;
for (val_iter = values.begin(); val_iter != values.end(); ++val_iter) {
write_comma_if_needed();
t_enum_value* val = (*val_iter);
start_object();
write_key_and_string("name", val->get_name());
write_key_and_integer("value", val->get_value());
if (val->has_doc()) {
write_key_and_string("doc", val->get_doc());
}
end_object();
indicate_comma_needed();
}
end_array();
end_object();
}
void t_json_generator::generate_struct(t_struct* tstruct) {
start_object();
write_key_and_string("name", tstruct->get_name());
if (tstruct->has_doc()) {
write_key_and_string("doc", tstruct->get_doc());
}
write_key_and_bool("isException", tstruct->is_xception());
write_key_and_bool("isUnion", tstruct->is_union());
write_key_and("fields");
start_array();
vector<t_field*> members = tstruct->get_members();
vector<t_field*>::iterator mem_iter;
for (mem_iter = members.begin(); mem_iter != members.end(); mem_iter++) {
write_comma_if_needed();
generate_field(*mem_iter);
indicate_comma_needed();
}
end_array();
end_object();
}
void t_json_generator::generate_service(t_service* tservice) {
start_object();
write_key_and_string("name", get_qualified_name(tservice));
if (tservice->get_extends()) {
write_key_and_string("extends", get_qualified_name(tservice->get_extends()));
}
if (tservice->has_doc()) {
write_key_and_string("doc", tservice->get_doc());
}
write_key_and("functions");
start_array();
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::iterator fn_iter = functions.begin();
for (; fn_iter != functions.end(); fn_iter++) {
write_comma_if_needed();
generate_function(*fn_iter);
indicate_comma_needed();
}
end_array();
end_object();
}
void t_json_generator::generate_function(t_function* tfunc) {
start_object();
write_key_and_string("name", tfunc->get_name());
write_key_and_string("returnTypeId", get_type_name(tfunc->get_returntype()));
write_type_spec_object("returnType", tfunc->get_returntype());
write_key_and_bool("oneway", tfunc->is_oneway());
if (tfunc->has_doc()) {
write_key_and_string("doc", tfunc->get_doc());
}
write_key_and("arguments");
start_array();
vector<t_field*> members = tfunc->get_arglist()->get_members();
vector<t_field*>::iterator mem_iter = members.begin();
for (; mem_iter != members.end(); mem_iter++) {
write_comma_if_needed();
generate_field(*mem_iter);
indicate_comma_needed();
}
end_array();
write_key_and("exceptions");
start_array();
vector<t_field*> excepts = tfunc->get_xceptions()->get_members();
vector<t_field*>::iterator ex_iter = excepts.begin();
for (; ex_iter != excepts.end(); ex_iter++) {
write_comma_if_needed();
generate_field(*ex_iter);
indicate_comma_needed();
}
end_array();
end_object();
}
void t_json_generator::generate_field(t_field* field) {
start_object();
write_key_and_integer("key", field->get_key());
write_key_and_string("name", field->get_name());
write_key_and_string("typeId", get_type_name(field->get_type()));
write_type_spec_object("type", field->get_type());
if (field->has_doc()) {
write_key_and_string("doc", field->get_doc());
}
write_key_and("required");
switch (field->get_req()) {
case t_field::T_REQUIRED:
write_string("required");
break;
case t_field::T_OPT_IN_REQ_OUT:
write_string("req_out");
break;
default:
write_string("optional");
break;
}
if (field->get_value()) {
write_key_and("default");
write_const_value(field->get_value());
}
end_object();
}
string t_json_generator::get_type_name(t_type* ttype) {
ttype = ttype->get_true_type();
if (ttype->is_list()) {
return "list";
}
if (ttype->is_set()) {
return "set";
}
if (ttype->is_map()) {
return "map";
}
if (ttype->is_enum()) {
return "i32";
}
if (ttype->is_struct()) {
return ((t_struct*)ttype)->is_union() ? "union" : "struct";
}
if (ttype->is_xception()) {
return "exception";
}
if (ttype->is_base_type()) {
t_base_type* tbasetype = (t_base_type*)ttype;
return tbasetype->is_binary() ? "binary" : t_base_type::t_base_name(tbasetype->get_base());
}
return "(unknown)";
}
string t_json_generator::get_qualified_name(t_type* ttype) {
if (should_merge_includes_ || ttype->get_program() == program_) {
return ttype->get_name();
}
return ttype->get_program()->get_name() + "." + ttype->get_name();
}
THRIFT_REGISTER_GENERATOR(json,
"JSON",
" merge: Generate output with included files merged\n")
| 1 | 13,014 | Nit: these lines are duplicated from above. They can be refactored by setting etype in a conditional and putting 282 and 283 below that. | apache-thrift | c |
@@ -49,7 +49,7 @@ func (s *Server) handleSignals() {
s.Debugf("Trapped %q signal", sig)
switch sig {
case syscall.SIGINT:
- s.Noticef("Server Exiting..")
+ s.Shutdown()
os.Exit(0)
case syscall.SIGUSR1:
// File log re-open for rotating file logs. | 1 | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package server
import (
"errors"
"fmt"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"syscall"
)
var processName = "nats-server"
// SetProcessName allows to change the expected name of the process.
func SetProcessName(name string) {
processName = name
}
// Signal Handling
func (s *Server) handleSignals() {
if s.getOpts().NoSigs {
return
}
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGUSR1, syscall.SIGUSR2, syscall.SIGHUP)
go func() {
for {
select {
case sig := <-c:
s.Debugf("Trapped %q signal", sig)
switch sig {
case syscall.SIGINT:
s.Noticef("Server Exiting..")
os.Exit(0)
case syscall.SIGUSR1:
// File log re-open for rotating file logs.
s.ReOpenLogFile()
case syscall.SIGUSR2:
go s.lameDuckMode()
case syscall.SIGHUP:
// Config reload.
if err := s.Reload(); err != nil {
s.Errorf("Failed to reload server configuration: %s", err)
}
}
case <-s.quitCh:
return
}
}
}()
}
// ProcessSignal sends the given signal command to the given process. If pidStr
// is empty, this will send the signal to the single running instance of
// nats-server. If multiple instances are running, it returns an error. This returns
// an error if the given process is not running or the command is invalid.
func ProcessSignal(command Command, pidStr string) error {
var pid int
if pidStr == "" {
pids, err := resolvePids()
if err != nil {
return err
}
if len(pids) == 0 {
return fmt.Errorf("no %s processes running", processName)
}
if len(pids) > 1 {
errStr := fmt.Sprintf("multiple %s processes running:\n", processName)
prefix := ""
for _, p := range pids {
errStr += fmt.Sprintf("%s%d", prefix, p)
prefix = "\n"
}
return errors.New(errStr)
}
pid = pids[0]
} else {
p, err := strconv.Atoi(pidStr)
if err != nil {
return fmt.Errorf("invalid pid: %s", pidStr)
}
pid = p
}
var err error
switch command {
case CommandStop:
err = kill(pid, syscall.SIGKILL)
case CommandQuit:
err = kill(pid, syscall.SIGINT)
case CommandReopen:
err = kill(pid, syscall.SIGUSR1)
case CommandReload:
err = kill(pid, syscall.SIGHUP)
case commandLDMode:
err = kill(pid, syscall.SIGUSR2)
default:
err = fmt.Errorf("unknown signal %q", command)
}
return err
}
// resolvePids returns the pids for all running nats-server processes.
func resolvePids() ([]int, error) {
// If pgrep isn't available, this will just bail out and the user will be
// required to specify a pid.
output, err := pgrep()
if err != nil {
switch err.(type) {
case *exec.ExitError:
// ExitError indicates non-zero exit code, meaning no processes
// found.
break
default:
return nil, errors.New("unable to resolve pid, try providing one")
}
}
var (
myPid = os.Getpid()
pidStrs = strings.Split(string(output), "\n")
pids = make([]int, 0, len(pidStrs))
)
for _, pidStr := range pidStrs {
if pidStr == "" {
continue
}
pid, err := strconv.Atoi(pidStr)
if err != nil {
return nil, errors.New("unable to resolve pid, try providing one")
}
// Ignore the current process.
if pid == myPid {
continue
}
pids = append(pids, pid)
}
return pids, nil
}
var kill = func(pid int, signal syscall.Signal) error {
return syscall.Kill(pid, signal)
}
var pgrep = func() ([]byte, error) {
return exec.Command("pgrep", processName).Output()
}
| 1 | 9,667 | I would do an s.Notice("Initiating Shutdown...") then after Shutdown() do the original Server Exiting. | nats-io-nats-server | go |
@@ -57,6 +57,11 @@ namespace Nethermind.DataMarketplace.Consumers.Refunds.Services
ulong now = _timestamper.UnixTime.Seconds;
if (!deposit.CanClaimRefund(now))
{
+ var timeLeftToClaimRefund = deposit.GetTimeLeftToClaimRefund(now);
+ if (timeLeftToClaimRefund > 0)
+ {
+ if (_logger.IsError) _logger.Info($"Time left to claim a refund: {timeLeftToClaimRefund} seconds.");
+ }
return RefundClaimStatus.Empty;
}
| 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.DataMarketplace.Consumers.Deposits.Domain;
using Nethermind.DataMarketplace.Consumers.Deposits.Repositories;
using Nethermind.DataMarketplace.Consumers.Shared.Services.Models;
using Nethermind.DataMarketplace.Core.Domain;
using Nethermind.DataMarketplace.Core.Services;
using Nethermind.Int256;
using Nethermind.Logging;
namespace Nethermind.DataMarketplace.Consumers.Refunds.Services
{
public class RefundClaimant : IRefundClaimant
{
private readonly IRefundService _refundService;
private readonly INdmBlockchainBridge _blockchainBridge;
private readonly IDepositDetailsRepository _depositRepository;
private readonly ITransactionVerifier _transactionVerifier;
private readonly IGasPriceService _gasPriceService;
private readonly ITimestamper _timestamper;
private readonly ILogger _logger;
public RefundClaimant(IRefundService refundService, INdmBlockchainBridge blockchainBridge,
IDepositDetailsRepository depositRepository, ITransactionVerifier transactionVerifier,
IGasPriceService gasPriceService, ITimestamper timestamper, ILogManager logManager)
{
_refundService = refundService;
_blockchainBridge = blockchainBridge;
_depositRepository = depositRepository;
_transactionVerifier = transactionVerifier;
_gasPriceService = gasPriceService;
_timestamper = timestamper;
_logger = logManager.GetClassLogger();
}
public async Task<RefundClaimStatus> TryClaimRefundAsync(DepositDetails deposit, Address refundTo)
{
ulong now = _timestamper.UnixTime.Seconds;
if (!deposit.CanClaimRefund(now))
{
return RefundClaimStatus.Empty;
}
Block? latestBlock = await _blockchainBridge.GetLatestBlockAsync();
if (latestBlock == null)
{
return RefundClaimStatus.Empty;
}
now = (ulong) latestBlock.Timestamp;
if (!deposit.CanClaimRefund(now))
{
return RefundClaimStatus.Empty;
}
Keccak depositId = deposit.Deposit.Id;
Keccak? transactionHash = deposit.ClaimedRefundTransaction?.Hash;
if (transactionHash is null)
{
Address provider = deposit.DataAsset.Provider.Address;
RefundClaim refundClaim = new RefundClaim(depositId, deposit.DataAsset.Id, deposit.Deposit.Units,
deposit.Deposit.Value, deposit.Deposit.ExpiryTime, deposit.Pepper, provider, refundTo);
UInt256 gasPrice = await _gasPriceService.GetCurrentRefundGasPriceAsync();
transactionHash = await _refundService.ClaimRefundAsync(refundTo, refundClaim, gasPrice);
if (transactionHash is null)
{
if (_logger.IsError) _logger.Error("There was an error when trying to claim refund (no transaction hash returned).");
return RefundClaimStatus.Empty;
}
deposit.AddClaimedRefundTransaction(TransactionInfo.Default(transactionHash, 0, gasPrice,
_refundService.GasLimit, _timestamper.UnixTime.Seconds));
await _depositRepository.UpdateAsync(deposit);
if (_logger.IsInfo) _logger.Info($"Claimed a refund for deposit: '{depositId}', gas price: {gasPrice} wei, transaction hash: '{transactionHash}' (awaits a confirmation).");
}
bool confirmed = await TryConfirmClaimAsync(deposit, string.Empty);
return confirmed
? RefundClaimStatus.Confirmed(transactionHash)
: RefundClaimStatus.Unconfirmed(transactionHash);
}
public async Task<RefundClaimStatus> TryClaimEarlyRefundAsync(DepositDetails deposit, Address refundTo)
{
ulong now = _timestamper.UnixTime.Seconds;
if (!deposit.CanClaimEarlyRefund(now, deposit.Timestamp))
{
return RefundClaimStatus.Empty;
}
Block? latestBlock = await _blockchainBridge.GetLatestBlockAsync();
if (latestBlock == null)
{
return RefundClaimStatus.Empty;
}
now = (ulong) latestBlock.Timestamp;
if (!deposit.CanClaimEarlyRefund(now, deposit.Timestamp))
{
return RefundClaimStatus.Empty;
}
Keccak depositId = deposit.Deposit.Id;
Keccak? transactionHash = deposit.ClaimedRefundTransaction?.Hash;
if (transactionHash is null)
{
Address provider = deposit.DataAsset.Provider.Address;
if (deposit.EarlyRefundTicket == null)
{
throw new InvalidDataException($"Early refund ticket is null on a claimable deposit {depositId}");
}
EarlyRefundTicket ticket = deposit.EarlyRefundTicket;
EarlyRefundClaim earlyRefundClaim = new EarlyRefundClaim(ticket.DepositId, deposit.DataAsset.Id,
deposit.Deposit.Units, deposit.Deposit.Value, deposit.Deposit.ExpiryTime, deposit.Pepper, provider,
ticket.ClaimableAfter, ticket.Signature, refundTo);
UInt256 gasPrice = await _gasPriceService.GetCurrentRefundGasPriceAsync();
transactionHash = await _refundService.ClaimEarlyRefundAsync(refundTo, earlyRefundClaim, gasPrice);
if (transactionHash is null)
{
if (_logger.IsError) _logger.Error("There was an error when trying to claim early refund (no transaction hash returned).");
return RefundClaimStatus.Empty;
}
deposit.AddClaimedRefundTransaction(TransactionInfo.Default(transactionHash, 0, gasPrice,
_refundService.GasLimit, _timestamper.UnixTime.Seconds));
await _depositRepository.UpdateAsync(deposit);
if (_logger.IsInfo) _logger.Info($"Claimed an early refund for deposit: '{depositId}', gas price: {gasPrice} wei, transaction hash: '{transactionHash}' (awaits a confirmation).");
}
bool confirmed = await TryConfirmClaimAsync(deposit, "early ");
return confirmed
? RefundClaimStatus.Confirmed(transactionHash)
: RefundClaimStatus.Unconfirmed(transactionHash);
}
private async Task<bool> TryConfirmClaimAsync(DepositDetails deposit, string type)
{
string claimType = $"{type}refund";
Keccak depositId = deposit.Id;
NdmTransaction? transactionDetails = null;
TransactionInfo includedTransaction = deposit.Transactions.SingleOrDefault(t => t.State == TransactionState.Included);
IOrderedEnumerable<TransactionInfo> pendingTransactions = deposit.Transactions
.Where(t => t.State == TransactionState.Pending)
.OrderBy(t => t.Timestamp);
if (_logger.IsInfo) _logger.Info($"Deposit: '{deposit.Id}' refund claim pending transactions: {string.Join(", ", pendingTransactions.Select(t => $"{t.Hash} [{t.Type}]"))}");
if (includedTransaction is null)
{
foreach (TransactionInfo transaction in pendingTransactions)
{
Keccak? transactionHash = transaction.Hash;
if (transactionHash is null)
{
if (_logger.IsInfo) _logger.Info($"Transaction was not found for hash: '{null}' for deposit: '{depositId}' to claim the {claimType}.");
return false;
}
transactionDetails = await _blockchainBridge.GetTransactionAsync(transactionHash);
if (transactionDetails is null)
{
if (_logger.IsInfo) _logger.Info($"Transaction was not found for hash: '{transactionHash}' for deposit: '{depositId}' to claim the {claimType}.");
return false;
}
if (transactionDetails.IsPending)
{
if (_logger.IsInfo) _logger.Info($"Transaction with hash: '{transactionHash}' for deposit: '{deposit.Id}' {claimType} claim is still pending.");
return false;
}
deposit.SetIncludedClaimedRefundTransaction(transactionHash);
if (_logger.IsInfo) _logger.Info($"Transaction with hash: '{transactionHash}', type: '{transaction.Type}' for deposit: '{deposit.Id}' {claimType} claim was included into block: {transactionDetails.BlockNumber}.");
await _depositRepository.UpdateAsync(deposit);
includedTransaction = transaction;
break;
}
}
else if (includedTransaction.Type == TransactionType.Cancellation)
{
return false;
}
else
{
transactionDetails = includedTransaction.Hash == null ? null : await _blockchainBridge.GetTransactionAsync(includedTransaction.Hash);
if (transactionDetails is null)
{
if (_logger.IsWarn) _logger.Warn($"Transaction (set as included) was not found for hash: '{includedTransaction.Hash}' for deposit: '{deposit.Id}' {claimType} claim.");
return false;
}
}
if (includedTransaction is null)
{
return false;
}
if (_logger.IsInfo) _logger.Info($"Trying to claim the {claimType} (transaction hash: '{includedTransaction.Hash}') for deposit: '{depositId}'.");
TransactionVerifierResult verifierResult = await _transactionVerifier.VerifyAsync(transactionDetails!);
if (!verifierResult.BlockFound)
{
if (_logger.IsWarn) _logger.Warn($"Block number: {transactionDetails!.BlockNumber}, hash: '{transactionDetails.BlockHash}' was not found for transaction hash: '{includedTransaction.Hash}' - {claimType} claim for deposit: '{depositId}' will not confirmed.");
return false;
}
if (_logger.IsInfo) _logger.Info($"The {claimType} claim (transaction hash: '{includedTransaction.Hash}') for deposit: '{depositId}' has {verifierResult.Confirmations} confirmations (required at least {verifierResult.RequiredConfirmations}).");
if (!verifierResult.Confirmed)
{
return false;
}
deposit.SetRefundClaimed();
await _depositRepository.UpdateAsync(deposit);
if (_logger.IsInfo) _logger.Info($"The {claimType} claim (transaction hash: '{includedTransaction.Hash}') for deposit: '{depositId}' has been confirmed.");
return true;
}
}
}
| 1 | 24,878 | Not sure with how many seconds on average are we dealing with but maybe it's better to have hh:mm:ss format in logs - you can make it with `TimeSpan.FromSeconds(seconds).ToString()` | NethermindEth-nethermind | .cs |
@@ -257,6 +257,12 @@ func (a *ClusterDeploymentValidatingAdmissionHook) validateCreate(admissionSpec
if aws.Region == "" {
allErrs = append(allErrs, field.Required(awsPath.Child("region"), "must specify AWS region"))
}
+ for i, mp := range newObject.Spec.Compute {
+ computePath := specPath.Child("compute").Index(i)
+ if mp.Platform.AWS == nil {
+ allErrs = append(allErrs, field.Required(computePath.Child("aws"), "must specify platform for compute machine sets"))
+ }
+ }
}
if newObject.Spec.Platform.Azure != nil {
numberOfPlatforms++ | 1 | package validatingwebhooks
import (
"bufio"
"encoding/json"
"fmt"
"net/http"
"os"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/rest"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
)
const (
clusterDeploymentGroup = "hive.openshift.io"
clusterDeploymentVersion = "v1alpha1"
clusterDeploymentResource = "clusterdeployments"
clusterDeploymentAdmissionGroup = "admission.hive.openshift.io"
clusterDeploymentAdmissionVersion = "v1alpha1"
clusterDeploymentAdmissionResource = "clusterdeployments"
// ManagedDomainsFileEnvVar if present, points to a simple text
// file that includes a valid managed domain per line. Cluster deployments
// requesting that their domains be managed must have a base domain
// that is a direct child of one of the valid domains.
ManagedDomainsFileEnvVar = "MANAGED_DOMAINS_FILE"
)
var (
mutableFields = []string{"CertificateBundles", "Compute", "ControlPlaneConfig", "Ingress", "Installed", "PreserveOnDelete"}
)
// ClusterDeploymentValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server.
type ClusterDeploymentValidatingAdmissionHook struct {
validManagedDomains []string
}
// NewClusterDeploymentValidatingAdmissionHook constructs a new ClusterDeploymentValidatingAdmissionHook
func NewClusterDeploymentValidatingAdmissionHook() *ClusterDeploymentValidatingAdmissionHook {
managedDomainsFile := os.Getenv(ManagedDomainsFileEnvVar)
logger := log.WithField("validating_webhook", "clusterdeployment")
webhook := &ClusterDeploymentValidatingAdmissionHook{}
if len(managedDomainsFile) == 0 {
logger.Debug("No managed domains file specified")
return webhook
}
logger.WithField("file", managedDomainsFile).Debug("Managed domains file specified")
var err error
webhook.validManagedDomains, err = readManagedDomainsFile(managedDomainsFile)
if err != nil {
logger.WithError(err).WithField("file", managedDomainsFile).Fatal("Unable to read managedDomains file")
}
return webhook
}
// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the
// webhook is accessed by the kube apiserver.
// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1alpha1/clusterdeployments".
// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below.
func (a *ClusterDeploymentValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) {
log.WithFields(log.Fields{
"group": clusterDeploymentAdmissionGroup,
"version": clusterDeploymentAdmissionVersion,
"resource": clusterDeploymentAdmissionResource,
}).Info("Registering validation REST resource")
// NOTE: This GVR is meant to be different than the ClusterDeployment CRD GVR which has group "hive.openshift.io".
return schema.GroupVersionResource{
Group: clusterDeploymentAdmissionGroup,
Version: clusterDeploymentAdmissionVersion,
Resource: clusterDeploymentAdmissionResource,
},
"clusterdeployment"
}
// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs.
func (a *ClusterDeploymentValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error {
log.WithFields(log.Fields{
"group": clusterDeploymentAdmissionGroup,
"version": clusterDeploymentAdmissionVersion,
"resource": clusterDeploymentAdmissionResource,
}).Info("Initializing validation REST resource")
return nil // No initialization needed right now.
}
// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request.
// Usually it's the kube apiserver that is making the admission validation request.
func (a *ClusterDeploymentValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse {
contextLogger := log.WithFields(log.Fields{
"operation": admissionSpec.Operation,
"group": admissionSpec.Resource.Group,
"version": admissionSpec.Resource.Version,
"resource": admissionSpec.Resource.Resource,
"method": "Validate",
})
if !a.shouldValidate(admissionSpec) {
contextLogger.Info("Skipping validation for request")
// The request object isn't something that this validator should validate.
// Therefore, we say that it's Allowed.
return &admissionv1beta1.AdmissionResponse{
Allowed: true,
}
}
contextLogger.Info("Validating request")
if admissionSpec.Operation == admissionv1beta1.Create {
return a.validateCreate(admissionSpec)
}
if admissionSpec.Operation == admissionv1beta1.Update {
return a.validateUpdate(admissionSpec)
}
// We're only validating creates and updates at this time, so all other operations are explicitly allowed.
contextLogger.Info("Successful validation")
return &admissionv1beta1.AdmissionResponse{
Allowed: true,
}
}
// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check
// the validity of some other type of object with a different GVR.
func (a *ClusterDeploymentValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool {
contextLogger := log.WithFields(log.Fields{
"operation": admissionSpec.Operation,
"group": admissionSpec.Resource.Group,
"version": admissionSpec.Resource.Version,
"resource": admissionSpec.Resource.Resource,
"method": "shouldValidate",
})
if admissionSpec.Resource.Group != clusterDeploymentGroup {
contextLogger.Debug("Returning False, not our group")
return false
}
if admissionSpec.Resource.Version != clusterDeploymentVersion {
contextLogger.Debug("Returning False, it's our group, but not the right version")
return false
}
if admissionSpec.Resource.Resource != clusterDeploymentResource {
contextLogger.Debug("Returning False, it's our group and version, but not the right resource")
return false
}
// If we get here, then we're supposed to validate the object.
contextLogger.Debug("Returning True, passed all prerequisites.")
return true
}
// validateCreate specifically validates create operations for ClusterDeployment objects.
func (a *ClusterDeploymentValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse {
contextLogger := log.WithFields(log.Fields{
"operation": admissionSpec.Operation,
"group": admissionSpec.Resource.Group,
"version": admissionSpec.Resource.Version,
"resource": admissionSpec.Resource.Resource,
"method": "validateCreate",
})
newObject := &hivev1.ClusterDeployment{}
err := json.Unmarshal(admissionSpec.Object.Raw, newObject)
if err != nil {
contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error())
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
},
}
}
// Add the new data to the contextLogger
contextLogger.Data["object.Name"] = newObject.Name
// TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd)
if len(newObject.Name) > validation.DNS1123LabelMaxLength {
message := fmt.Sprintf("Invalid cluster deployment name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength))
contextLogger.Error(message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
if len(newObject.Spec.ClusterName) > validation.DNS1123LabelMaxLength {
message := fmt.Sprintf("Invalid cluster name (.spec.clusterName): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength))
contextLogger.Error(message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
// validate the ingress
if ingressValidationResult := validateIngress(newObject, contextLogger); ingressValidationResult != nil {
return ingressValidationResult
}
if newObject.Spec.ManageDNS {
if !validateDomain(newObject.Spec.BaseDomain, a.validManagedDomains) {
message := "The base domain must be a child of one of the managed domains for ClusterDeployments with manageDNS set to true"
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
}
allErrs := field.ErrorList{}
specPath := field.NewPath("spec")
if newObject.Spec.SSHKey.Name == "" {
allErrs = append(allErrs, field.Required(specPath.Child("sshKey", "name"), "must specify an SSH key to use"))
}
platformPath := specPath.Child("platform")
platformSecretsPath := specPath.Child("platformSecrets")
numberOfPlatforms := 0
canManageDNS := false
if newObject.Spec.Platform.AWS != nil {
numberOfPlatforms++
canManageDNS = true
if newObject.Spec.PlatformSecrets.AWS == nil {
allErrs = append(allErrs, field.Required(platformSecretsPath.Child("aws"), "must specify secrets for AWS access"))
}
aws := newObject.Spec.Platform.AWS
awsPath := platformPath.Child("aws")
if aws.Region == "" {
allErrs = append(allErrs, field.Required(awsPath.Child("region"), "must specify AWS region"))
}
}
if newObject.Spec.Platform.Azure != nil {
numberOfPlatforms++
if newObject.Spec.PlatformSecrets.Azure == nil {
allErrs = append(allErrs, field.Required(platformSecretsPath.Child("azure"), "must specify secrets for Azure access"))
}
azure := newObject.Spec.Platform.Azure
azurePath := platformPath.Child("azure")
if azure.Region == "" {
allErrs = append(allErrs, field.Required(azurePath.Child("region"), "must specify Azure region"))
}
if azure.BaseDomainResourceGroupName == "" {
allErrs = append(allErrs, field.Required(azurePath.Child("baseDomainResourceGroupName"), "must specify the Azure resource group for the base domain"))
}
}
if newObject.Spec.Platform.GCP != nil {
numberOfPlatforms++
if newObject.Spec.PlatformSecrets.GCP == nil {
allErrs = append(allErrs, field.Required(platformSecretsPath.Child("gcp"), "must specify secrets for GCP access"))
}
gcp := newObject.Spec.Platform.GCP
gcpPath := platformPath.Child("gcp")
if gcp.ProjectID == "" {
allErrs = append(allErrs, field.Required(gcpPath.Child("projectID"), "must specify GCP project ID"))
}
if gcp.Region == "" {
allErrs = append(allErrs, field.Required(gcpPath.Child("region"), "must specify GCP region"))
}
}
switch {
case numberOfPlatforms == 0:
allErrs = append(allErrs, field.Required(platformPath, "must specify a platform"))
case numberOfPlatforms > 1:
allErrs = append(allErrs, field.Invalid(platformPath, newObject.Spec.Platform, "must specify only a single platform"))
}
if !canManageDNS && newObject.Spec.ManageDNS {
allErrs = append(allErrs, field.Invalid(specPath.Child("manageDNS"), newObject.Spec.ManageDNS, "cannot manage DNS for the selected platform"))
}
if len(allErrs) > 0 {
status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status()
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &status,
}
}
// If we get here, then all checks passed, so the object is valid.
contextLogger.Info("Successful validation")
return &admissionv1beta1.AdmissionResponse{
Allowed: true,
}
}
// validateUpdate specifically validates update operations for ClusterDeployment objects.
func (a *ClusterDeploymentValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse {
contextLogger := log.WithFields(log.Fields{
"operation": admissionSpec.Operation,
"group": admissionSpec.Resource.Group,
"version": admissionSpec.Resource.Version,
"resource": admissionSpec.Resource.Resource,
"method": "validateUpdate",
})
newObject := &hivev1.ClusterDeployment{}
err := json.Unmarshal(admissionSpec.Object.Raw, newObject)
if err != nil {
contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error())
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
},
}
}
// Add the new data to the contextLogger
contextLogger.Data["object.Name"] = newObject.Name
oldObject := &hivev1.ClusterDeployment{}
err = json.Unmarshal(admissionSpec.OldObject.Raw, oldObject)
if err != nil {
contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error())
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
},
}
}
// Add the new data to the contextLogger
contextLogger.Data["oldObject.Name"] = oldObject.Name
hasChangedImmutableField, changedFieldName := hasChangedImmutableField(&oldObject.Spec, &newObject.Spec)
if hasChangedImmutableField {
message := fmt.Sprintf("Attempted to change ClusterDeployment.Spec.%v. ClusterDeployment.Spec is immutable except for %v", changedFieldName, mutableFields)
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
// validate the newly incoming ingress
if ingressValidationResult := validateIngress(newObject, contextLogger); ingressValidationResult != nil {
return ingressValidationResult
}
// Now catch the case where there was a previously defined list and now it's being emptied
hasClearedOutPreviouslyDefinedIngressList := hasClearedOutPreviouslyDefinedIngressList(&oldObject.Spec, &newObject.Spec)
if hasClearedOutPreviouslyDefinedIngressList {
message := fmt.Sprintf("Previously defined a list of ingress objects, must provide a default ingress object")
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
// Check to make sure only allowed fields under controlPlaneConfig are being modified
if hasChangedImmutableControlPlaneConfigFields(&oldObject.Spec, &newObject.Spec) {
message := fmt.Sprintf("Attempt to modify immutable field in controlPlaneConfig (only servingCertificates is mutable)")
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
// Check that existing machinepools aren't modifying the labels and/or taints fields
hasChangedImmutableMachinePoolFields, computePoolName := hasChangedImmutableMachinePoolFields(&oldObject.Spec, &newObject.Spec)
if hasChangedImmutableMachinePoolFields {
message := fmt.Sprintf("Detected attempt to change Labels or Taints on existing Compute object: %s", computePoolName)
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
if oldObject.Spec.Installed && !newObject.Spec.Installed {
allErrs := field.ErrorList{}
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "installed"), newObject.Spec.Installed, "cannot make uninstalled once installed"))
contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation")
status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status()
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &status,
}
}
// If we get here, then all checks passed, so the object is valid.
contextLogger.Info("Successful validation")
return &admissionv1beta1.AdmissionResponse{
Allowed: true,
}
}
// isFieldMutable says whether the ClusterDeployment.spec field is meant to be mutable or not.
func isFieldMutable(value string) bool {
for _, mutableField := range mutableFields {
if value == mutableField {
return true
}
}
return false
}
// hasChangedImmutableField determines if a ClusterDeployment.spec immutable field was changed.
func hasChangedImmutableField(oldObject, newObject *hivev1.ClusterDeploymentSpec) (bool, string) {
ooElem := reflect.ValueOf(oldObject).Elem()
noElem := reflect.ValueOf(newObject).Elem()
for i := 0; i < ooElem.NumField(); i++ {
ooFieldName := ooElem.Type().Field(i).Name
ooValue := ooElem.Field(i).Interface()
noValue := noElem.Field(i).Interface()
if !isFieldMutable(ooFieldName) && !reflect.DeepEqual(ooValue, noValue) {
// The field isn't mutable -and- has been changed. DO NOT ALLOW.
return true, ooFieldName
}
}
return false, ""
}
// currently only allow controlPlaneConfig.servingCertificates to be mutable
func hasChangedImmutableControlPlaneConfigFields(origObject, newObject *hivev1.ClusterDeploymentSpec) bool {
origCopy := origObject.ControlPlaneConfig.DeepCopy()
newCopy := newObject.ControlPlaneConfig.DeepCopy()
// blank out the servingCertificates, since we don't care if they're different
origCopy.ServingCertificates = hivev1.ControlPlaneServingCertificateSpec{}
newCopy.ServingCertificates = hivev1.ControlPlaneServingCertificateSpec{}
if !reflect.DeepEqual(origCopy, newCopy) {
return true
}
return false
}
func hasChangedImmutableMachinePoolFields(oldObject, newObject *hivev1.ClusterDeploymentSpec) (bool, string) {
// any pre-existing compute machinepool should not mutate the Labels or Taints fields
for _, newMP := range newObject.Compute {
origMP := getOriginalMachinePool(oldObject.Compute, newMP.Name)
if origMP == nil {
// no mutate checks needed for new compute machinepool
continue
}
// Check if labels are being changed
if !reflect.DeepEqual(origMP.Labels, newMP.Labels) {
return true, newMP.Name
}
// Check if taints are being changed
if !reflect.DeepEqual(origMP.Taints, newMP.Taints) {
return true, newMP.Name
}
}
return false, ""
}
func getOriginalMachinePool(origMachinePools []hivev1.MachinePool, name string) *hivev1.MachinePool {
var origMP *hivev1.MachinePool
for i, mp := range origMachinePools {
if mp.Name == name {
origMP = &origMachinePools[i]
break
}
}
return origMP
}
func hasClearedOutPreviouslyDefinedIngressList(oldObject, newObject *hivev1.ClusterDeploymentSpec) bool {
// We don't allow a ClusterDeployment which had previously defined a list of Ingress objects
// to then be cleared out. It either must be cleared from the beginning (ie just use default behavior),
// or the ClusterDeployment must continue to define at least the 'default' ingress object.
if len(oldObject.Ingress) > 0 && len(newObject.Ingress) == 0 {
return true
}
return false
}
func validateIngressDomainsShareClusterDomain(newObject *hivev1.ClusterDeploymentSpec) bool {
// ingress entries must share the same domain as the cluster
// so watch for an ingress domain ending in: .<clusterName>.<baseDomain>
regexString := fmt.Sprintf(`(?i).*\.%s.%s$`, newObject.ClusterName, newObject.BaseDomain)
sharedSubdomain := regexp.MustCompile(regexString)
for _, ingress := range newObject.Ingress {
if !sharedSubdomain.Match([]byte(ingress.Domain)) {
return false
}
}
return true
}
func validateIngressDomainsNotWildcard(newObject *hivev1.ClusterDeploymentSpec) bool {
// check for domains with leading '*'
// the * is unnecessary as the ingress controller assumes a wildcard
for _, ingress := range newObject.Ingress {
if ingress.Domain[0] == '*' {
return false
}
}
return true
}
// empty ingress is allowed (for create), but if it's non-zero
// it must include an entry for 'default'
func validateIngressList(newObject *hivev1.ClusterDeploymentSpec) bool {
if len(newObject.Ingress) == 0 {
return true
}
defaultFound := false
for _, ingress := range newObject.Ingress {
if ingress.Name == "default" {
defaultFound = true
}
}
if !defaultFound {
return false
}
return true
}
func readManagedDomainsFile(fileName string) ([]string, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
result := []string{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
s := scanner.Text()
s = strings.TrimSpace(s)
if len(s) > 0 {
result = append(result, s)
}
}
return result, nil
}
func validateDomain(domain string, validDomains []string) bool {
for _, validDomain := range validDomains {
if strings.HasSuffix(domain, "."+validDomain) {
childPart := strings.TrimSuffix(domain, "."+validDomain)
if !strings.Contains(childPart, ".") {
return true
}
}
}
return false
}
func validateIngress(newObject *hivev1.ClusterDeployment, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse {
if !validateIngressList(&newObject.Spec) {
message := fmt.Sprintf("Ingress list must include a default entry")
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
if !validateIngressDomainsNotWildcard(&newObject.Spec) {
message := "Ingress domains must not lead with *"
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
if !validateIngressDomainsShareClusterDomain(&newObject.Spec) {
message := "Ingress domains must share the same domain as the cluster"
contextLogger.Infof("Failed validation: %v", message)
return &admissionv1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: message,
},
}
}
// everything passed
return nil
}
| 1 | 8,137 | Why is this required? The user should be able to omit it and use the defaults. | openshift-hive | go |
@@ -1353,6 +1353,12 @@ class MouseSettingsPanel(SettingsPanel):
self.audioDetectBrightnessCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioDetectBrightnessText))
self.audioDetectBrightnessCheckBox.SetValue(config.conf["mouse"]["audioCoordinates_detectBrightness"])
+ # Translators: This is the label for a checkbox in the
+ # mouse settings panel.
+ handleInjectedMouseControlText = _("Handle mouse control from other &applications")
+ self.handleInjectedMouseControlCheckBox=sHelper.addItem(wx.CheckBox(self,label=handleInjectedMouseControlText))
+ self.handleInjectedMouseControlCheckBox.SetValue(config.conf["mouse"]["handleInjectedMouseControl"])
+
def onSave(self):
config.conf["mouse"]["reportMouseShapeChanges"]=self.shapeCheckBox.IsChecked()
config.conf["mouse"]["enableMouseTracking"]=self.mouseTrackingCheckBox.IsChecked() | 1 | # -*- coding: UTF-8 -*-
#settingsDialogs.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Rui Batista, Joseph Lee, Heiko Folkerts, Zahari Yurukov, Leonard de Ruijter, Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import glob
import os
import copy
import re
import wx
from wx.lib import scrolledpanel
from wx.lib.expando import ExpandoTextCtrl
import wx.lib.newevent
import winUser
import logHandler
import installer
from synthDriverHandler import *
import config
import languageHandler
import speech
import gui
from gui import nvdaControls
import globalVars
from logHandler import log
import nvwave
import audioDucking
import speechDictHandler
import appModuleHandler
import queueHandler
import braille
import brailleTables
import brailleInput
import core
import keyboardHandler
import characterProcessing
import guiHelper
try:
import updateCheck
except RuntimeError:
updateCheck = None
import inputCore
import nvdaControls
import touchHandler
import winVersion
import weakref
import time
class SettingsDialog(wx.Dialog):
"""A settings dialog.
A settings dialog consists of one or more settings controls and OK and Cancel buttons and an optional Apply button.
Action may be taken in response to the OK, Cancel or Apply buttons.
To use this dialog:
* Set L{title} to the title of the dialog.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, override L{postInit} to perform actions after the dialog is created, such as setting the focus. Be
aware that L{postInit} is also called by L{onApply}.
* Optionally, extend one or more of L{onOk}, L{onCancel} or L{onApply} to perform actions in response to the
OK, Cancel or Apply buttons, respectively.
@ivar title: The title of the dialog.
@type title: str
"""
class MultiInstanceError(RuntimeError): pass
_instances=weakref.WeakSet()
title = ""
shouldSuspendConfigProfileTriggers = True
def __new__(cls, *args, **kwargs):
if next((dlg for dlg in SettingsDialog._instances if isinstance(dlg,cls)),None) or (
SettingsDialog._instances and not kwargs.get('multiInstanceAllowed',False)
):
raise SettingsDialog.MultiInstanceError("Only one instance of SettingsDialog can exist at a time")
pass
obj = super(SettingsDialog, cls).__new__(cls, *args, **kwargs)
SettingsDialog._instances.add(obj)
return obj
def __init__(self, parent,
resizeable=False,
hasApplyButton=False,
settingsSizerOrientation=wx.VERTICAL,
multiInstanceAllowed=False):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param resizeable: True if the settings dialog should be resizable by the user, only set this if
you have tested that the components resize correctly.
@type resizeable: bool
@param hasApplyButton: C{True} to add an apply button to the dialog; defaults to C{False} for backwards compatibility.
@type hasApplyButton: bool
@param settingsSizerOrientation: Either wx.VERTICAL or wx.HORIZONTAL. This controls the orientation of the
sizer that is passed into L{makeSettings}. The default is wx.VERTICAL.
@type settingsSizerOrientation: wx.Orientation
@param multiInstanceAllowed: Whether multiple instances of SettingsDialog may exist.
Note that still only one instance of a particular SettingsDialog subclass may exist at one time.
@type multiInstanceAllowed: bool
"""
if gui._isDebug():
startTime = time.time()
windowStyle = wx.DEFAULT_DIALOG_STYLE | (wx.RESIZE_BORDER if resizeable else 0)
super(SettingsDialog, self).__init__(parent, title=self.title, style=windowStyle)
self.hasApply = hasApplyButton
# the wx.Window must be constructed before we can get the handle.
import windowUtils
self.scaleFactor = windowUtils.getWindowScalingFactor(self.GetHandle())
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(settingsSizerOrientation)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL | wx.EXPAND, proportion=1)
self.mainSizer.Add(wx.StaticLine(self), flag=wx.EXPAND)
buttonSizer = guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The Ok button on a NVDA dialog. This button will accept any changes and dismiss the dialog.
buttonSizer.addButton(self, label=_("OK"), id=wx.ID_OK)
# Translators: The cancel button on a NVDA dialog. This button will discard any changes and dismiss the dialog.
buttonSizer.addButton(self, label=_("Cancel"), id=wx.ID_CANCEL)
if hasApplyButton:
# Translators: The Apply button on a NVDA dialog. This button will accept any changes but will not dismiss the dialog.
buttonSizer.addButton(self, label=_("Apply"), id=wx.ID_APPLY)
self.mainSizer.Add(
buttonSizer.sizer,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.ALL | wx.ALIGN_RIGHT
)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
self.Bind(wx.EVT_BUTTON, self.onApply, id=wx.ID_APPLY)
self.Bind(wx.EVT_CHAR_HOOK, self._enterActivatesOk_ctrlSActivatesApply)
self.postInit()
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def _enterActivatesOk_ctrlSActivatesApply(self, evt):
"""Listens for keyboard input and triggers ok button on enter and triggers apply button when control + S is
pressed. Cancel behavior is built into wx.
Pressing enter will also close the dialog when a list has focus
(e.g. the list of symbols in the symbol pronunciation dialog).
Without this custom handler, enter would propagate to the list control (wx ticket #3725).
"""
if evt.KeyCode in (wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER):
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK))
elif self.hasApply and evt.UnicodeKey == ord(u'S') and evt.controlDown:
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_APPLY))
else:
evt.Skip()
def makeSettings(self, sizer):
"""Populate the dialog with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def postInit(self):
"""Called after the dialog has been created.
For example, this might be used to set focus to the desired control.
Sub-classes may override this method.
"""
def onOk(self, evt):
"""Take action in response to the OK button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_OK)
def onCancel(self, evt):
"""Take action in response to the Cancel button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_CANCEL)
def onApply(self, evt):
"""Take action in response to the Apply button being pressed.
Sub-classes may extend or override this method.
This base method should be called to run the postInit method.
"""
self.postInit()
self.SetReturnCode(wx.ID_APPLY)
def scaleSize(self, size):
"""Helper method to scale a size using the logical DPI
@param size: The size (x,y) as a tuple or a single numerical type to scale
@returns: The scaled size, returned as the same type"""
if isinstance(size, tuple):
return (self.scaleFactor * size[0], self.scaleFactor * size[1])
return self.scaleFactor * size
# An event and event binder that will notify the containers that they should
# redo the layout in whatever way makes sense for their particular content.
_RWLayoutNeededEvent, EVT_RW_LAYOUT_NEEDED = wx.lib.newevent.NewCommandEvent()
class SettingsPanel(wx.Panel):
"""A settings panel, to be used in a multi category settings dialog.
A settings panel consists of one or more settings controls.
Action may be taken in response to the parent dialog's OK or Cancel buttons.
To use this panel:
* Set L{title} to the title of the category.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, extend L{onPanelActivated} to perform actions after the category has been selected in the list of categories, such as synthesizer or braille display list population.
* Optionally, extend L{onPanelDeactivated} to perform actions after the category has been deselected (i.e. another category is selected) in the list of categories.
* Optionally, extend one or both of L{onSave} or L{onDiscard} to perform actions in response to the parent dialog's OK or Cancel buttons, respectively.
* Optionally, extend one or both of L{isValid} or L{postSave} to perform validation before or steps after saving, respectively.
@ivar title: The title of the settings panel, also listed in the list of settings categories.
@type title: str
"""
title=""
def __init__(self, parent):
"""
@param parent: The parent for this panel; C{None} for no parent.
@type parent: wx.Window
"""
if gui._isDebug():
startTime = time.time()
super(SettingsPanel, self).__init__(parent, wx.ID_ANY)
# the wx.Window must be constructed before we can get the handle.
import windowUtils
self.scaleFactor = windowUtils.getWindowScalingFactor(self.GetHandle())
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(wx.VERTICAL)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, flag=wx.ALL)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def makeSettings(self, sizer):
"""Populate the panel with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def onPanelActivated(self):
"""Called after the panel has been activated (i.e. de corresponding category is selected in the list of categories).
For example, this might be used for resource intensive tasks.
Sub-classes should extendthis method.
"""
self.Show()
def onPanelDeactivated(self):
"""Called after the panel has been deactivated (i.e. another category has been selected in the list of categories).
Sub-classes should extendthis method.
"""
self.Hide()
def onSave(self):
"""Take action in response to the parent's dialog OK or apply button being pressed.
Sub-classes should override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when OK is pressed.
"""
raise NotImplementedError
def isValid(self):
"""Evaluate whether the current circumstances of this panel are valid
and allow saving all the settings in a L{MultiCategorySettingsDialog}.
Sub-classes may extend this method.
@returns: C{True} if validation should continue,
C{False} otherwise.
@rtype: bool
"""
return True
def postSave(self):
"""Take action whenever saving settings for all panels in a L{MultiCategorySettingsDialog} succeeded.
Sub-classes may extend this method.
"""
def onDiscard(self):
"""Take action in response to the parent's dialog Cancel button being pressed.
Sub-classes may override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when Cancel is pressed.
"""
def _sendLayoutUpdatedEvent(self):
"""Notify any wx parents that may be listening that they should redo their layout in whatever way
makes sense for them. It is expected that sub-classes call this method in response to changes in
the number of GUI items in their panel.
"""
event = _RWLayoutNeededEvent(self.GetId())
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
def scaleSize(self, size):
"""Helper method to scale a size using the logical DPI
@param size: The size (x,y) as a tuple or a single numerical type to scale
@returns: The scaled size, returned as the same type"""
if isinstance(size, tuple):
return (self.scaleFactor * size[0], self.scaleFactor * size[1])
return self.scaleFactor * size
class MultiCategorySettingsDialog(SettingsDialog):
"""A settings dialog with multiple settings categories.
A multi category settings dialog consists of a list view with settings categories on the left side,
and a settings panel on the right side of the dialog.
Furthermore, in addition to Ok and Cancel buttons, it has an Apply button by default,
which is different from the default behavior of L{SettingsDialog}.
To use this dialog: set title and populate L{categoryClasses} with subclasses of SettingsPanel.
Make sure that L{categoryClasses} only contains panels that are available on a particular system.
For example, if a certain category of settings is only supported on Windows 10 and higher,
that category should be left out of L{categoryClasses}
"""
title=""
categoryClasses=[]
class CategoryUnavailableError(RuntimeError): pass
def __init__(self, parent, initialCategory=None):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param initialCategory: The initial category to select when opening this dialog
@type parent: SettingsPanel
"""
if initialCategory and not issubclass(initialCategory,SettingsPanel):
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise TypeError("initialCategory should be an instance of SettingsPanel")
if initialCategory and initialCategory not in self.categoryClasses:
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise MultiCategorySettingsDialog.CategoryUnavailableError(
"The provided initial category is not a part of this dialog"
)
self.initialCategory = initialCategory
self.currentCategory = None
self.setPostInitFocus = None
# dictionary key is index of category in self.catList, value is the instance. Partially filled, check for KeyError
self.catIdToInstanceMap = {}
super(MultiCategorySettingsDialog, self).__init__(
parent,
resizeable=True,
hasApplyButton=True,
settingsSizerOrientation=wx.HORIZONTAL
)
# setting the size must be done after the parent is constructed.
self.SetMinSize(self.scaleSize(self.MIN_SIZE))
self.SetSize(self.scaleSize(self.INITIAL_SIZE))
# the size has changed, so recenter on the screen
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
# Initial / min size for the dialog. This size was chosen as a medium fit, so the
# smaller settings panels are not surrounded by too much space but most of
# the panels fit. Vertical scrolling is acceptable. Horizontal scrolling less
# so, the width was chosen to eliminate horizontal scroll bars. If a panel
# exceeds the the initial width a debugWarning will be added to the log.
INITIAL_SIZE = (800, 480)
MIN_SIZE = (470, 240) # Min height required to show the OK, Cancel, Apply buttons
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the list of categories in a multi category settings dialog.
categoriesLabelText=_("&Categories:")
categoriesLabel = wx.StaticText(self, label=categoriesLabelText)
# since the categories list and the container both expand in height, the y
# portion is essentially a "min" height.
# These sizes are set manually so that the initial proportions within the dialog look correct. If these sizes are
# not given, then I believe the proportion arguments (as given to the gridBagSizer.AddGrowableColumn) are used
# to set their relative sizes. We want the proportion argument to be used for resizing, but not the initial size.
catListDim = (150, 10)
catListDim = self.scaleSize(catListDim)
initialScaledWidth = self.scaleSize(self.INITIAL_SIZE[0])
spaceForBorderWidth = self.scaleSize(20)
catListWidth = catListDim[0]
containerDim = (initialScaledWidth - catListWidth - spaceForBorderWidth, self.scaleSize(10))
self.catListCtrl = nvdaControls.AutoWidthColumnListCtrl(
self,
autoSizeColumnIndex=0,
size=catListDim,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER
)
# This list consists of only one column.
# The provided column header is just a placeholder, as it is hidden due to the wx.LC_NO_HEADER style flag.
self.catListCtrl.InsertColumn(0,categoriesLabelText)
# Put the settings panel in a scrolledPanel, we don't know how large the settings panels might grow. If they exceed
# the maximum size, its important all items can be accessed visually.
# Save the ID for the panel, this panel will have its name changed when the categories are changed. This name is
# exposed via the IAccessibleName property.
global NvdaSettingsCategoryPanelId
NvdaSettingsCategoryPanelId = wx.NewId()
self.container = scrolledpanel.ScrolledPanel(
parent = self,
id = NvdaSettingsCategoryPanelId,
style = wx.TAB_TRAVERSAL | wx.BORDER_THEME,
size=containerDim
)
# Th min size is reset so that they can be reduced to below their "size" constraint.
self.container.SetMinSize((1,1))
self.catListCtrl.SetMinSize((1,1))
self.containerSizer = wx.BoxSizer(wx.VERTICAL)
self.container.SetSizer(self.containerSizer)
for cls in self.categoryClasses:
if not issubclass(cls,SettingsPanel):
raise RuntimeError("Invalid category class %s provided in %s.categoryClasses"%(cls.__name__,self.__class__.__name__))
# It's important here that the listItems are added to catListCtrl in the same order that they exist in categoryClasses.
# the ListItem index / Id is used to index categoryClasses, and used as the key in catIdToInstanceMap
self.catListCtrl.Append((cls.title,))
# populate the GUI with the initial category
initialCatIndex = 0 if not self.initialCategory else self.categoryClasses.index(self.initialCategory)
self._doCategoryChange(initialCatIndex)
self.catListCtrl.Select(initialCatIndex)
# we must focus the initial category in the category list.
self.catListCtrl.Focus(initialCatIndex)
self.setPostInitFocus = self.container.SetFocus if self.initialCategory else self.catListCtrl.SetFocus
self.gridBagSizer=gridBagSizer=wx.GridBagSizer(
hgap=guiHelper.SPACE_BETWEEN_BUTTONS_HORIZONTAL,
vgap=guiHelper.SPACE_BETWEEN_BUTTONS_VERTICAL
)
# add the label, the categories list, and the settings panel to a 2 by 2 grid.
# The label should span two columns, so that the start of the categories list
# and the start of the settings panel are at the same vertical position.
gridBagSizer.Add(categoriesLabel, pos=(0,0), span=(1,2))
gridBagSizer.Add(self.catListCtrl, pos=(1,0), flag=wx.EXPAND)
gridBagSizer.Add(self.container, pos=(1,1), flag=wx.EXPAND)
# Make the row with the listCtrl and settings panel grow vertically.
gridBagSizer.AddGrowableRow(1)
# Make the columns with the listCtrl and settings panel grow horizontally if the dialog is resized.
# They should grow 1:3, since the settings panel is much more important, and already wider
# than the listCtrl.
gridBagSizer.AddGrowableCol(0, proportion=1)
gridBagSizer.AddGrowableCol(1, proportion=3)
sHelper.sizer.Add(gridBagSizer, flag=wx.EXPAND, proportion=1)
self.container.Layout()
self.catListCtrl.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onCategoryChange)
self.Bind(wx.EVT_CHAR_HOOK, self.onCharHook)
self.Bind(EVT_RW_LAYOUT_NEEDED, self._onPanelLayoutChanged)
def _getCategoryPanel(self, catId):
panel = self.catIdToInstanceMap.get(catId, None)
if not panel:
try:
cls = self.categoryClasses[catId]
except IndexError:
raise ValueError("Unable to create panel for unknown category ID: {}".format(catId))
panel = cls(parent=self.container)
panel.Hide()
self.containerSizer.Add(panel, flag=wx.ALL, border=guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL)
self.catIdToInstanceMap[catId] = panel
panelWidth = panel.Size[0]
availableWidth = self.containerSizer.GetSize()[0]
if panelWidth > availableWidth and gui._isDebug():
log.debugWarning(
("Panel width ({1}) too large for: {0} Try to reduce the width of this panel, or increase width of " +
"MultiCategorySettingsDialog.MIN_SIZE"
).format(cls, panel.Size[0])
)
return panel
def postInit(self):
# By default after the dialog is created, focus lands on the button group for wx.Dialogs. However this is not where
# we want focus. We only want to modify focus after creation (makeSettings), but postInit is also called after
# onApply, so we reset the setPostInitFocus function.
if self.setPostInitFocus:
self.setPostInitFocus()
self.setPostInitFocus = None
else:
# when postInit is called without a setPostInitFocus ie because onApply was called
# then set the focus to the listCtrl. This is a good starting point for a "fresh state"
self.catListCtrl.SetFocus()
def onCharHook(self,evt):
"""Listens for keyboard input and switches panels for control+tab"""
if not self.catListCtrl:
# Dialog has not yet been constructed.
# Allow another handler to take the event, and return early.
evt.Skip()
return
key = evt.GetKeyCode()
listHadFocus = self.catListCtrl.HasFocus()
if evt.ControlDown() and key==wx.WXK_TAB:
# Focus the categories list. If we don't, the panel won't hide correctly
if not listHadFocus:
self.catListCtrl.SetFocus()
index = self.catListCtrl.GetFirstSelected()
newIndex=index-1 if evt.ShiftDown() else index+1
# Less than first wraps to the last index, greater than last wraps to first index.
newIndex=newIndex % self.catListCtrl.ItemCount
self.catListCtrl.Select(newIndex)
# we must focus the new selection in the category list to trigger the change of category.
self.catListCtrl.Focus(newIndex)
if not listHadFocus and self.currentCategory:
self.currentCategory.SetFocus()
else:
evt.Skip()
def _onPanelLayoutChanged(self,evt):
# call layout and SetupScrolling on the container so that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# when child elements get smaller the scrolledPanel does not
# erase the old contents and must be redrawn
self.container.Refresh()
def _doCategoryChange(self, newCatId):
oldCat = self.currentCategory
# Freeze and Thaw are called to stop visual artifact's while the GUI
# is being rebuilt. Without this, the controls can sometimes be seen being
# added.
self.container.Freeze()
try:
newCat = self._getCategoryPanel(newCatId)
except ValueError as e:
newCatTitle = self.catListCtrl.GetItemText(newCatId)
log.error("Unable to change to category: {}".format(newCatTitle), exc_info=e)
return
if oldCat:
oldCat.onPanelDeactivated()
self.currentCategory = newCat
newCat.onPanelActivated()
# call Layout and SetupScrolling on the container to make sure that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# Set the label for the container, this is exposed via the Name property on an NVDAObject.
# For one or another reason, doing this before SetupScrolling causes this to be ignored by NVDA in some cases.
# Translators: This is the label for a category within the settings dialog. It is announced when the user presses `ctl+tab` or `ctrl+shift+tab` while focus is on a control withing the NVDA settings dialog. The %s will be replaced with the name of the panel (eg: General, Speech, Braille, etc)
self.container.SetLabel(_("%s Settings Category")%newCat.title)
self.container.Thaw()
def onCategoryChange(self, evt):
currentCat = self.currentCategory
newIndex = evt.GetIndex()
if not currentCat or newIndex != self.categoryClasses.index(currentCat.__class__):
self._doCategoryChange(newIndex)
else:
evt.Skip()
def _doSave(self):
for panel in self.catIdToInstanceMap.itervalues():
if panel.isValid() is False:
raise ValueError("Validation for %s blocked saving settings" % panel.__class__.__name__)
for panel in self.catIdToInstanceMap.itervalues():
panel.onSave()
for panel in self.catIdToInstanceMap.itervalues():
panel.postSave()
def onOk(self,evt):
try:
self._doSave()
except ValueError:
log.debugWarning("", exc_info=True)
return
for panel in self.catIdToInstanceMap.itervalues():
panel.Destroy()
super(MultiCategorySettingsDialog,self).onOk(evt)
def onCancel(self,evt):
for panel in self.catIdToInstanceMap.itervalues():
panel.onDiscard()
panel.Destroy()
super(MultiCategorySettingsDialog,self).onCancel(evt)
def onApply(self,evt):
try:
self._doSave()
except ValueError:
log.debugWarning("", exc_info=True)
return
super(MultiCategorySettingsDialog,self).onApply(evt)
class GeneralSettingsPanel(SettingsPanel):
# Translators: This is the label for the general settings panel.
title = _("General")
LOG_LEVELS = (
# Translators: One of the log levels of NVDA (the info mode shows info as NVDA runs).
(log.INFO, _("info")),
# Translators: One of the log levels of NVDA (the debug warning shows debugging messages and warnings as NVDA runs).
(log.DEBUGWARNING, _("debug warning")),
# Translators: One of the log levels of NVDA (the input/output shows keyboard commands and/or braille commands as well as speech and/or braille output of NVDA).
(log.IO, _("input/output")),
# Translators: One of the log levels of NVDA (the debug mode shows debug messages as NVDA runs).
(log.DEBUG, _("debug"))
)
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.languageNames = languageHandler.getAvailableLanguages(presentational=True)
languageChoices = [x[1] for x in self.languageNames]
# Translators: The label for a setting in general settings to select NVDA's interface language (once selected, NVDA must be restarted; the option user default means the user's Windows language will be used).
languageLabelText = _("&Language (requires restart to fully take effect):")
self.languageList=settingsSizerHelper.addLabeledControl(languageLabelText, wx.Choice, choices=languageChoices)
self.languageList.SetToolTip(wx.ToolTip("Choose the language NVDA's messages and user interface should be presented in."))
try:
self.oldLanguage=config.conf["general"]["language"]
index=[x[0] for x in self.languageNames].index(self.oldLanguage)
self.languageList.SetSelection(index)
except:
pass
if globalVars.appArgs.secure:
self.languageList.Disable()
# Translators: The label for a setting in general settings to save current configuration when NVDA exits (if it is not checked, user needs to save configuration before quitting NVDA).
self.saveOnExitCheckBox=wx.CheckBox(self,label=_("&Save configuration on exit"))
self.saveOnExitCheckBox.SetValue(config.conf["general"]["saveConfigurationOnExit"])
if globalVars.appArgs.secure:
self.saveOnExitCheckBox.Disable()
settingsSizerHelper.addItem(self.saveOnExitCheckBox)
# Translators: The label for a setting in general settings to ask before quitting NVDA (if not checked, NVDA will exit without asking the user for action).
self.askToExitCheckBox=wx.CheckBox(self,label=_("Sho&w exit options when exiting NVDA"))
self.askToExitCheckBox.SetValue(config.conf["general"]["askToExit"])
settingsSizerHelper.addItem(self.askToExitCheckBox)
# Translators: The label for a setting in general settings to play sounds when NVDA starts or exits.
self.playStartAndExitSoundsCheckBox=wx.CheckBox(self,label=_("&Play sounds when starting or exiting NVDA"))
self.playStartAndExitSoundsCheckBox.SetValue(config.conf["general"]["playStartAndExitSounds"])
settingsSizerHelper.addItem(self.playStartAndExitSoundsCheckBox)
# Translators: The label for a setting in general settings to select logging level of NVDA as it runs (available options and what they are logged are found under comments for the logging level messages themselves).
logLevelLabelText=_("L&ogging level:")
logLevelChoices = [name for level, name in self.LOG_LEVELS]
self.logLevelList = settingsSizerHelper.addLabeledControl(logLevelLabelText, wx.Choice, choices=logLevelChoices)
curLevel = log.getEffectiveLevel()
for index, (level, name) in enumerate(self.LOG_LEVELS):
if level == curLevel:
self.logLevelList.SetSelection(index)
break
else:
log.debugWarning("Could not set log level list to current log level")
# Translators: The label for a setting in general settings to allow NVDA to start after logging onto Windows (if checked, NvDA will start automatically after loggin into Windows; if not, user must start NVDA by pressing the shortcut key (CTRL+Alt+N by default).
self.startAfterLogonCheckBox = wx.CheckBox(self, label=_("&Automatically start NVDA after I log on to Windows"))
self.startAfterLogonCheckBox.SetValue(config.getStartAfterLogon())
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.startAfterLogonCheckBox.Disable()
settingsSizerHelper.addItem(self.startAfterLogonCheckBox)
# Translators: The label for a setting in general settings to allow NVDA to come up in Windows login screen (useful if user needs to enter passwords or if multiple user accounts are present to allow user to choose the correct account).
self.startOnLogonScreenCheckBox = wx.CheckBox(self, label=_("Use NVDA on the Windows logon screen (requires administrator privileges)"))
self.startOnLogonScreenCheckBox.SetValue(config.getStartOnLogonScreen())
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.startOnLogonScreenCheckBox.Disable()
settingsSizerHelper.addItem(self.startOnLogonScreenCheckBox)
# Translators: The label for a button in general settings to copy current user settings to system settings (to allow current settings to be used in secure screens such as User Account Control (UAC) dialog).
self.copySettingsButton= wx.Button(self, label=_("Use currently saved settings on the logon and other secure screens (requires administrator privileges)"))
self.copySettingsButton.Bind(wx.EVT_BUTTON,self.onCopySettings)
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.copySettingsButton.Disable()
settingsSizerHelper.addItem(self.copySettingsButton)
if updateCheck:
# Translators: The label of a checkbox in general settings to toggle automatic checking for updated versions of NVDA (if not checked, user must check for updates manually).
item=self.autoCheckForUpdatesCheckBox=wx.CheckBox(self,label=_("Automatically check for &updates to NVDA"))
item.Value=config.conf["update"]["autoCheck"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle startup notifications
# for a pending NVDA update.
item=self.notifyForPendingUpdateCheckBox=wx.CheckBox(self,label=_("Notify for &pending update on startup"))
item.Value=config.conf["update"]["startupNotification"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle allowing of usage stats gathering
item=self.allowUsageStatsCheckBox=wx.CheckBox(self,label=_("Allow the NVDA project to gather NVDA usage statistics"))
item.Value=config.conf["update"]["allowUsageStats"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
def onCopySettings(self,evt):
for packageType in ('addons','appModules','globalPlugins','brailleDisplayDrivers','synthDrivers'):
if len(os.listdir(os.path.join(globalVars.appArgs.configPath,packageType)))>0:
if gui.messageBox(
# Translators: A message to warn the user when attempting to copy current settings to system settings.
_("Add-ons were detected in your user settings directory. Copying these to the system profile could be a security risk. Do you still wish to copy your settings?"),
# Translators: The title of the warning dialog displayed when trying to copy settings for use in secure screens.
_("Warning"),wx.YES|wx.NO|wx.ICON_WARNING,self
)==wx.NO:
return
break
progressDialog = gui.IndeterminateProgressDialog(gui.mainFrame,
# Translators: The title of the dialog presented while settings are being copied
_("Copying Settings"),
# Translators: The message displayed while settings are being copied to the system configuration (for use on Windows logon etc)
_("Please wait while settings are copied to the system configuration."))
while True:
try:
gui.ExecAndPump(config.setSystemConfigToCurrentConfig)
res=True
break
except installer.RetriableFailure:
log.debugWarning("Error when copying settings to system config",exc_info=True)
# Translators: a message dialog asking to retry or cancel when copying settings fails
message=_("Unable to copy a file. Perhaps it is currently being used by another process or you have run out of disc space on the drive you are copying to.")
# Translators: the title of a retry cancel dialog when copying settings fails
title=_("Error Copying")
if winUser.MessageBox(None,message,title,winUser.MB_RETRYCANCEL)==winUser.IDRETRY:
continue
res=False
break
except:
log.debugWarning("Error when copying settings to system config",exc_info=True)
res=False
break
progressDialog.done()
del progressDialog
if not res:
# Translators: The message displayed when errors were found while trying to copy current configuration to system settings.
gui.messageBox(_("Error copying NVDA user settings"),_("Error"),wx.OK|wx.ICON_ERROR,self)
else:
# Translators: The message displayed when copying configuration to system settings was successful.
gui.messageBox(_("Successfully copied NVDA user settings"),_("Success"),wx.OK|wx.ICON_INFORMATION,self)
def onSave(self):
newLanguage=[x[0] for x in self.languageNames][self.languageList.GetSelection()]
config.conf["general"]["language"]=newLanguage
config.conf["general"]["saveConfigurationOnExit"]=self.saveOnExitCheckBox.IsChecked()
config.conf["general"]["askToExit"]=self.askToExitCheckBox.IsChecked()
config.conf["general"]["playStartAndExitSounds"]=self.playStartAndExitSoundsCheckBox.IsChecked()
logLevel=self.LOG_LEVELS[self.logLevelList.GetSelection()][0]
config.conf["general"]["loggingLevel"]=logHandler.levelNames[logLevel]
logHandler.setLogLevelFromConfig()
if self.startAfterLogonCheckBox.IsEnabled():
config.setStartAfterLogon(self.startAfterLogonCheckBox.GetValue())
if self.startOnLogonScreenCheckBox.IsEnabled():
try:
config.setStartOnLogonScreen(self.startOnLogonScreenCheckBox.GetValue())
except (WindowsError, RuntimeError):
gui.messageBox(_("This change requires administrator privileges."), _("Insufficient Privileges"), style=wx.OK | wx.ICON_ERROR, parent=self)
if updateCheck:
config.conf["update"]["autoCheck"]=self.autoCheckForUpdatesCheckBox.IsChecked()
config.conf["update"]["allowUsageStats"]=self.allowUsageStatsCheckBox.IsChecked()
config.conf["update"]["startupNotification"]=self.notifyForPendingUpdateCheckBox.IsChecked()
updateCheck.terminate()
updateCheck.initialize()
def postSave(self):
if self.oldLanguage!=config.conf["general"]["language"]:
if gui.messageBox(
# Translators: The message displayed after NVDA interface language has been changed.
_("For the new language to take effect, the configuration must be saved and NVDA must be restarted. Press enter to save and restart NVDA, or cancel to manually save and exit at a later time."),
# Translators: The title of the dialog which appears when the user changed NVDA's interface language.
_("Language Configuration Change"),wx.OK|wx.CANCEL|wx.ICON_WARNING,self
)==wx.OK:
config.conf.save()
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
class SpeechSettingsPanel(SettingsPanel):
# Translators: This is the label for the speech panel
title = _("Speech")
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the synthesizer on the speech panel.
synthLabel = _("&Synthesizer")
synthBox = wx.StaticBox(self, label=synthLabel)
synthGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(synthBox, wx.HORIZONTAL))
settingsSizerHelper.addItem(synthGroup)
# Use a ExpandoTextCtrl because even when readonly it accepts focus from keyboard, which
# standard readonly TextCtrl does not. ExpandoTextCtrl is a TE_MULTILINE control, however
# by default it renders as a single line. Standard TextCtrl with TE_MULTILINE has two lines,
# and a vertical scroll bar. This is not neccessary for the single line of text we wish to
# display here.
synthDesc = getSynth().description
self.synthNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), value=synthDesc, style=wx.TE_READONLY)
self.synthNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeSynth)
# Translators: This is the label for the button used to change synthesizer,
# it appears in the context of a synthesizer group on the speech settings panel.
changeSynthBtn = wx.Button(self, label=_("C&hange..."))
synthGroup.addItem(
guiHelper.associateElements(
self.synthNameCtrl,
changeSynthBtn
)
)
changeSynthBtn.Bind(wx.EVT_BUTTON,self.onChangeSynth)
self.voicePanel = VoiceSettingsPanel(self)
settingsSizerHelper.addItem(self.voicePanel)
def _enterTriggersOnChangeSynth(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeSynth(evt)
else:
evt.Skip()
def onChangeSynth(self, evt):
changeSynth = SynthesizerSelectionDialog(self, multiInstanceAllowed=True)
ret = changeSynth.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentSynth(self):
synthDesc = getSynth().description
self.synthNameCtrl.SetValue(synthDesc)
def onPanelActivated(self):
# call super after all panel updates have been completed, we dont want the panel to show until this is complete.
self.voicePanel.onPanelActivated()
super(SpeechSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.voicePanel.onPanelDeactivated()
super(SpeechSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.voicePanel.onDiscard()
def onSave(self):
self.voicePanel.onSave()
class SynthesizerSelectionDialog(SettingsDialog):
# Translators: This is the label for the synthesizer selection dialog
title = _("Select Synthesizer")
synthNames = []
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label for the select
# synthesizer combobox in the synthesizer dialog.
synthListLabelText=_("&Synthesizer:")
self.synthList = settingsSizerHelper.addLabeledControl(synthListLabelText, wx.Choice, choices=[])
self.updateSynthesizerList()
# Translators: This is the label for the select output
# device combo in the synthesizer dialog. Examples of
# of an output device are default soundcard, usb
# headphones, etc.
deviceListLabelText = _("Output &device:")
deviceNames=nvwave.getOutputDeviceNames()
self.deviceList = settingsSizerHelper.addLabeledControl(deviceListLabelText, wx.Choice, choices=deviceNames)
try:
selection = deviceNames.index(config.conf["speech"]["outputDevice"])
except ValueError:
selection = 0
self.deviceList.SetSelection(selection)
# Translators: This is a label for the audio ducking combo box in the Synthesizer Settings dialog.
duckingListLabelText=_("Audio &ducking mode:")
self.duckingList=settingsSizerHelper.addLabeledControl(duckingListLabelText, wx.Choice, choices=audioDucking.audioDuckingModes)
index=config.conf['audio']['audioDuckingMode']
self.duckingList.SetSelection(index)
if not audioDucking.isAudioDuckingSupported():
self.duckingList.Disable()
def postInit(self):
# Finally, ensure that focus is on the synthlist
self.synthList.SetFocus()
def updateSynthesizerList(self):
driverList=getSynthList()
self.synthNames=[x[0] for x in driverList]
options=[x[1] for x in driverList]
self.synthList.Clear()
self.synthList.AppendItems(options)
try:
index=self.synthNames.index(getSynth().name)
self.synthList.SetSelection(index)
except:
pass
def onOk(self, evt):
if not self.synthNames:
# The list of synths has not been populated yet, so we didn't change anything in this panel
return
config.conf["speech"]["outputDevice"]=self.deviceList.GetStringSelection()
newSynth=self.synthNames[self.synthList.GetSelection()]
if not setSynth(newSynth):
# Translators: This message is presented when
# NVDA is unable to load the selected
# synthesizer.
gui.messageBox(_("Could not load the %s synthesizer.")%newSynth,_("Synthesizer Error"),wx.OK|wx.ICON_WARNING,self)
return
if audioDucking.isAudioDuckingSupported():
index=self.duckingList.GetSelection()
config.conf['audio']['audioDuckingMode']=index
audioDucking.setAudioDuckingMode(index)
if self.IsModal():
# Hack: we need to update the synth in our parent window before closing.
# Otherwise, NVDA will report the old synth even though the new synth is reflected visually.
self.Parent.updateCurrentSynth()
super(SynthesizerSelectionDialog, self).onOk(evt)
class SynthSettingChanger(object):
"""Functor which acts as calback for GUI events."""
def __init__(self,setting):
self.setting=setting
def __call__(self,evt):
val=evt.GetSelection()
setattr(getSynth(),self.setting.name,val)
class StringSynthSettingChanger(SynthSettingChanger):
"""Same as L{SynthSettingChanger} but handles combobox events."""
def __init__(self,setting,panel):
self.panel=panel
super(StringSynthSettingChanger,self).__init__(setting)
def __call__(self,evt):
if self.setting.name=="voice":
# Cancel speech first so that the voice will change immediately instead of the change being queued.
speech.cancelSpeech()
changeVoice(getSynth(),getattr(self.panel,"_%ss"%self.setting.name)[evt.GetSelection()].ID)
self.panel.updateVoiceSettings(changedSetting=self.setting.name)
else:
setattr(getSynth(),self.setting.name,getattr(self.panel,"_%ss"%self.setting.name)[evt.GetSelection()].ID)
class VoiceSettingsSlider(wx.Slider):
def __init__(self,*args, **kwargs):
super(VoiceSettingsSlider,self).__init__(*args,**kwargs)
self.Bind(wx.EVT_CHAR, self.onSliderChar)
def SetValue(self,i):
super(VoiceSettingsSlider, self).SetValue(i)
evt = wx.CommandEvent(wx.wxEVT_COMMAND_SLIDER_UPDATED,self.GetId())
evt.SetInt(i)
self.ProcessEvent(evt)
# HACK: Win events don't seem to be sent for certain explicitly set values,
# so send our own win event.
# This will cause duplicates in some cases, but NVDA will filter them out.
winUser.user32.NotifyWinEvent(winUser.EVENT_OBJECT_VALUECHANGE,self.Handle,winUser.OBJID_CLIENT,winUser.CHILDID_SELF)
def onSliderChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_UP:
newValue = min(self.Value + self.LineSize, self.Max)
elif key == wx.WXK_DOWN:
newValue = max(self.Value - self.LineSize, self.Min)
elif key == wx.WXK_PRIOR:
newValue = min(self.Value + self.PageSize, self.Max)
elif key == wx.WXK_NEXT:
newValue = max(self.Value - self.PageSize, self.Min)
elif key == wx.WXK_HOME:
newValue = self.Max
elif key == wx.WXK_END:
newValue = self.Min
else:
evt.Skip()
return
self.SetValue(newValue)
class VoiceSettingsPanel(SettingsPanel):
# Translators: This is the label for the voice settings panel.
title = _("Voice")
@classmethod
def _setSliderStepSizes(cls, slider, setting):
slider.SetLineSize(setting.minStep)
slider.SetPageSize(setting.largeStep)
def makeSettingControl(self,setting):
"""Constructs appropriate GUI controls for given L{SynthSetting} such as label and slider.
@param setting: Setting to construct controls for
@type setting: L{SynthSetting}
@returns: WXSizer containing newly created controls.
@rtype: L{wx.BoxSizer}
"""
sizer=wx.BoxSizer(wx.HORIZONTAL)
label=wx.StaticText(self,wx.ID_ANY,label="%s:"%setting.displayNameWithAccelerator)
slider=VoiceSettingsSlider(self,wx.ID_ANY,minValue=0,maxValue=100)
setattr(self,"%sSlider"%setting.name,slider)
slider.Bind(wx.EVT_SLIDER,SynthSettingChanger(setting))
self._setSliderStepSizes(slider,setting)
slider.SetValue(getattr(getSynth(),setting.name))
sizer.Add(label)
sizer.Add(slider)
if self.lastControl:
slider.MoveAfterInTabOrder(self.lastControl)
self.lastControl=slider
return sizer
def makeStringSettingControl(self,setting):
"""Same as L{makeSettingControl} but for string settings. Returns sizer with label and combobox."""
labelText="%s:"%setting.displayNameWithAccelerator
synth=getSynth()
setattr(self,"_%ss"%setting.name,getattr(synth,"available%ss"%setting.name.capitalize()).values())
l=getattr(self,"_%ss"%setting.name)###
labeledControl=guiHelper.LabeledControlHelper(self, labelText, wx.Choice, choices=[x.name for x in l])
lCombo = labeledControl.control
setattr(self,"%sList"%setting.name,lCombo)
try:
cur=getattr(synth,setting.name)
i=[x.ID for x in l].index(cur)
lCombo.SetSelection(i)
except ValueError:
pass
lCombo.Bind(wx.EVT_CHOICE,StringSynthSettingChanger(setting,self))
if self.lastControl:
lCombo.MoveAfterInTabOrder(self.lastControl)
self.lastControl=lCombo
return labeledControl.sizer
def makeBooleanSettingControl(self,setting):
"""Same as L{makeSettingControl} but for boolean settings. Returns checkbox."""
checkbox=wx.CheckBox(self,wx.ID_ANY,label=setting.displayNameWithAccelerator)
setattr(self,"%sCheckbox"%setting.name,checkbox)
checkbox.Bind(wx.EVT_CHECKBOX,
lambda evt: setattr(getSynth(),setting.name,evt.IsChecked()))
checkbox.SetValue(getattr(getSynth(),setting.name))
if self.lastControl:
checkbox.MoveAfterInTabOrder(self.lastControl)
self.lastControl=checkbox
return checkbox
def onPanelActivated(self):
if getSynth().name is not self._synth.name:
if gui._isDebug():
log.debug("refreshing voice panel")
self.sizerDict.clear()
self.settingsSizer.Clear(deleteWindows=True)
self.makeSettings(self.settingsSizer)
super(VoiceSettingsPanel,self).onPanelActivated()
def makeSettings(self, settingsSizer):
self.sizerDict={}
self.lastControl=None
#Create controls for Synth Settings
self.updateVoiceSettings()
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
autoLanguageSwitchingText = _("Automatic language switching (when supported)")
self.autoLanguageSwitchingCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=autoLanguageSwitchingText))
self.autoLanguageSwitchingCheckbox.SetValue(config.conf["speech"]["autoLanguageSwitching"])
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, different voices for dialects will be used to read text in that dialect).
autoDialectSwitchingText =_("Automatic dialect switching (when supported)")
self.autoDialectSwitchingCheckbox=settingsSizerHelper.addItem(wx.CheckBox(self,label=autoDialectSwitchingText))
self.autoDialectSwitchingCheckbox.SetValue(config.conf["speech"]["autoDialectSwitching"])
# Translators: This is the label for a combobox in the
# voice settings panel (possible choices are none, some, most and all).
punctuationLabelText = _("Punctuation/symbol &level:")
symbolLevelLabels=characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
symbolLevelChoices =[symbolLevelLabels[level] for level in characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS]
self.symbolLevelList = settingsSizerHelper.addLabeledControl(punctuationLabelText, wx.Choice, choices=symbolLevelChoices)
curLevel = config.conf["speech"]["symbolLevel"]
self.symbolLevelList.SetSelection(characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS.index(curLevel))
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
trustVoiceLanguageText = _("Trust voice's language when processing characters and symbols")
self.trustVoiceLanguageCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=trustVoiceLanguageText))
self.trustVoiceLanguageCheckbox.SetValue(config.conf["speech"]["trustVoiceLanguage"])
# Translators: This is a label for a setting in voice settings (an edit box to change voice pitch for capital letters; the higher the value, the pitch will be higher).
capPitchChangeLabelText=_("Capital pitch change percentage")
self.capPitchChangeEdit=settingsSizerHelper.addLabeledControl(capPitchChangeLabelText, nvdaControls.SelectOnFocusSpinCtrl,
min=int(config.conf.getConfigValidationParameter(["speech", getSynth().name, "capPitchChange"], "min")),
max=int(config.conf.getConfigValidationParameter(["speech", getSynth().name, "capPitchChange"], "max")),
initial=config.conf["speech"][getSynth().name]["capPitchChange"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
sayCapForCapsText = _("Say &cap before capitals")
self.sayCapForCapsCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self,label=sayCapForCapsText))
self.sayCapForCapsCheckBox.SetValue(config.conf["speech"][getSynth().name]["sayCapForCapitals"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
beepForCapsText =_("&Beep for capitals")
self.beepForCapsCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self, label = beepForCapsText))
self.beepForCapsCheckBox.SetValue(config.conf["speech"][getSynth().name]["beepForCapitals"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
useSpellingFunctionalityText = _("Use &spelling functionality if supported")
self.useSpellingFunctionalityCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self, label = useSpellingFunctionalityText))
self.useSpellingFunctionalityCheckBox.SetValue(config.conf["speech"][getSynth().name]["useSpellingFunctionality"])
def updateVoiceSettings(self, changedSetting=None):
"""Creates, hides or updates existing GUI controls for all of supported settings."""
synth=self._synth=getSynth()
#firstly check already created options
for name,sizer in self.sizerDict.iteritems():
if name == changedSetting:
# Changing a setting shouldn't cause that setting itself to disappear.
continue
if not synth.isSupported(name):
self.settingsSizer.Hide(sizer)
#Create new controls, update already existing
for setting in synth.supportedSettings:
if setting.name == changedSetting:
# Changing a setting shouldn't cause that setting's own values to change.
continue
if setting.name in self.sizerDict: #update a value
self.settingsSizer.Show(self.sizerDict[setting.name])
if isinstance(setting,NumericSynthSetting):
getattr(self,"%sSlider"%setting.name).SetValue(getattr(synth,setting.name))
elif isinstance(setting,BooleanSynthSetting):
getattr(self,"%sCheckbox"%setting.name).SetValue(getattr(synth,setting.name))
else:
l=getattr(self,"_%ss"%setting.name)
lCombo=getattr(self,"%sList"%setting.name)
try:
cur=getattr(synth,setting.name)
i=[x.ID for x in l].index(cur)
lCombo.SetSelection(i)
except ValueError:
pass
else: #create a new control
if isinstance(setting,NumericSynthSetting):
settingMaker=self.makeSettingControl
elif isinstance(setting,BooleanSynthSetting):
settingMaker=self.makeBooleanSettingControl
else:
settingMaker=self.makeStringSettingControl
s=settingMaker(setting)
self.sizerDict[setting.name]=s
self.settingsSizer.Insert(len(self.sizerDict)-1,s,border=10,flag=wx.BOTTOM)
#Update graphical layout of the dialog
self.settingsSizer.Layout()
def onDiscard(self):
#unbind change events for string settings as wx closes combo boxes on cancel
for setting in getSynth().supportedSettings:
if isinstance(setting,(NumericSynthSetting,BooleanSynthSetting)): continue
getattr(self,"%sList"%setting.name).Unbind(wx.EVT_CHOICE)
#restore settings
getSynth().loadSettings()
super(VoiceSettingsPanel,self).onDiscard()
def onSave(self):
synth = getSynth()
synth.saveSettings()
config.conf["speech"]["autoLanguageSwitching"]=self.autoLanguageSwitchingCheckbox.IsChecked()
config.conf["speech"]["autoDialectSwitching"]=self.autoDialectSwitchingCheckbox.IsChecked()
config.conf["speech"]["symbolLevel"]=characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS[self.symbolLevelList.GetSelection()]
config.conf["speech"]["trustVoiceLanguage"]=self.trustVoiceLanguageCheckbox.IsChecked()
config.conf["speech"][synth.name]["capPitchChange"]=self.capPitchChangeEdit.Value
config.conf["speech"][synth.name]["sayCapForCapitals"]=self.sayCapForCapsCheckBox.IsChecked()
config.conf["speech"][synth.name]["beepForCapitals"]=self.beepForCapsCheckBox.IsChecked()
config.conf["speech"][synth.name]["useSpellingFunctionality"]=self.useSpellingFunctionalityCheckBox.IsChecked()
class KeyboardSettingsPanel(SettingsPanel):
# Translators: This is the label for the keyboard settings panel.
title = _("Keyboard")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a combobox in the
# keyboard settings panel.
kbdLabelText = _("&Keyboard layout:")
layouts=keyboardHandler.KeyboardInputGesture.LAYOUTS
self.kbdNames=sorted(layouts)
kbdChoices = [layouts[layout] for layout in self.kbdNames]
self.kbdList=sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices)
try:
index=self.kbdNames.index(config.conf['keyboard']['keyboardLayout'])
self.kbdList.SetSelection(index)
except:
log.debugWarning("Could not set Keyboard layout list to current layout",exc_info=True)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
capsAsNVDAText = _("Use CapsLock as an NVDA modifier key")
self.capsAsNVDAModifierCheckBox=sHelper.addItem(wx.CheckBox(self,label=capsAsNVDAText))
self.capsAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
numpadInsertAsModText = _("Use numpad Insert as an NVDA modifier key")
self.numpadInsertAsNVDAModifierCheckBox=sHelper.addItem(wx.CheckBox(self,label=numpadInsertAsModText))
self.numpadInsertAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
extendedInsertAsModText = _("Use extended Insert as an NVDA modifier key")
self.extendedInsertAsNVDAModifierCheckBox=sHelper.addItem(wx.CheckBox(self,label=extendedInsertAsModText))
self.extendedInsertAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
charsText = _("Speak typed &characters")
self.charsCheckBox=sHelper.addItem(wx.CheckBox(self,label=charsText))
self.charsCheckBox.SetValue(config.conf["keyboard"]["speakTypedCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speakTypedWordsText = _("Speak typed &words")
self.wordsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speakTypedWordsText))
self.wordsCheckBox.SetValue(config.conf["keyboard"]["speakTypedWords"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForCharText = _("Speech interrupt for typed characters")
self.speechInterruptForCharsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForCharText))
self.speechInterruptForCharsCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForEnterText = _("Speech interrupt for Enter key")
self.speechInterruptForEnterCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForEnterText))
self.speechInterruptForEnterCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForEnter"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
allowSkimReadingInSayAllText = _("Allow skim &reading in Say All")
self.skimReadingInSayAllCheckBox=sHelper.addItem(wx.CheckBox(self,label=allowSkimReadingInSayAllText))
self.skimReadingInSayAllCheckBox.SetValue(config.conf["keyboard"]["allowSkimReadingInSayAll"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
beepForLowercaseWithCapsLockText = _("Beep if typing lowercase letters when caps lock is on")
self.beepLowercaseCheckBox=sHelper.addItem(wx.CheckBox(self,label=beepForLowercaseWithCapsLockText))
self.beepLowercaseCheckBox.SetValue(config.conf["keyboard"]["beepForLowercaseWithCapslock"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
commandKeysText = _("Speak command &keys")
self.commandKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=commandKeysText))
self.commandKeysCheckBox.SetValue(config.conf["keyboard"]["speakCommandKeys"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
alertForSpellingErrorsText = _("Play sound for &spelling errors while typing")
self.alertForSpellingErrorsCheckBox=sHelper.addItem(wx.CheckBox(self,label=alertForSpellingErrorsText))
self.alertForSpellingErrorsCheckBox.SetValue(config.conf["keyboard"]["alertForSpellingErrors"])
if not config.conf["documentFormatting"]["reportSpellingErrors"]:
self.alertForSpellingErrorsCheckBox.Disable()
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
handleInjectedKeysText = _("Handle keys from other &applications")
self.handleInjectedKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=handleInjectedKeysText))
self.handleInjectedKeysCheckBox.SetValue(config.conf["keyboard"]["handleInjectedKeys"])
def isValid(self):
# #2871: check wether at least one key is the nvda key.
if not self.capsAsNVDAModifierCheckBox.IsChecked() and not self.numpadInsertAsNVDAModifierCheckBox.IsChecked() and not self.extendedInsertAsNVDAModifierCheckBox.IsChecked():
log.debugWarning("No NVDA key set")
gui.messageBox(
# Translators: Message to report wrong configuration of the NVDA key
_("At least one key must be used as the NVDA key."),
# Translators: The title of the message box
_("Error"), wx.OK|wx.ICON_ERROR,self)
return False
return super(KeyboardSettingsPanel, self).isValid()
def onSave(self):
layout=self.kbdNames[self.kbdList.GetSelection()]
config.conf['keyboard']['keyboardLayout']=layout
config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]=self.capsAsNVDAModifierCheckBox.IsChecked()
config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]=self.numpadInsertAsNVDAModifierCheckBox.IsChecked()
config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"]=self.extendedInsertAsNVDAModifierCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedCharacters"]=self.charsCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedWords"]=self.wordsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForCharacters"]=self.speechInterruptForCharsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForEnter"]=self.speechInterruptForEnterCheckBox.IsChecked()
config.conf["keyboard"]["allowSkimReadingInSayAll"]=self.skimReadingInSayAllCheckBox.IsChecked()
config.conf["keyboard"]["beepForLowercaseWithCapslock"]=self.beepLowercaseCheckBox.IsChecked()
config.conf["keyboard"]["speakCommandKeys"]=self.commandKeysCheckBox.IsChecked()
config.conf["keyboard"]["alertForSpellingErrors"]=self.alertForSpellingErrorsCheckBox.IsChecked()
config.conf["keyboard"]["handleInjectedKeys"]=self.handleInjectedKeysCheckBox.IsChecked()
class MouseSettingsPanel(SettingsPanel):
# Translators: This is the label for the mouse settings panel.
title = _("Mouse")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
shapeChangesText = _("Report mouse &shape changes")
self.shapeCheckBox=sHelper.addItem(wx.CheckBox(self,label=shapeChangesText))
self.shapeCheckBox.SetValue(config.conf["mouse"]["reportMouseShapeChanges"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
mouseTrackingText=_("Enable mouse &tracking")
self.mouseTrackingCheckBox=sHelper.addItem(wx.CheckBox(self,label=mouseTrackingText))
self.mouseTrackingCheckBox.SetValue(config.conf["mouse"]["enableMouseTracking"])
# Translators: This is the label for a combobox in the
# mouse settings panel.
textUnitLabelText=_("Text &unit resolution:")
import textInfos
self.textUnits=[textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD,textInfos.UNIT_LINE,textInfos.UNIT_PARAGRAPH]
textUnitsChoices = [textInfos.unitLabels[x] for x in self.textUnits]
self.textUnitComboBox=sHelper.addLabeledControl(textUnitLabelText, wx.Choice, choices=textUnitsChoices)
try:
index=self.textUnits.index(config.conf["mouse"]["mouseTextUnit"])
except:
index=0
self.textUnitComboBox.SetSelection(index)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
reportObjectRoleText = _("Report &role when mouse enters object")
self.reportObjectRoleCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportObjectRoleText))
self.reportObjectRoleCheckBox.SetValue(config.conf["mouse"]["reportObjectRoleOnMouseEnter"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioText = _("&Play audio coordinates when mouse moves")
self.audioCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioText))
self.audioCheckBox.SetValue(config.conf["mouse"]["audioCoordinatesOnMouseMove"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioDetectBrightnessText = _("&Brightness controls audio coordinates volume")
self.audioDetectBrightnessCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioDetectBrightnessText))
self.audioDetectBrightnessCheckBox.SetValue(config.conf["mouse"]["audioCoordinates_detectBrightness"])
def onSave(self):
config.conf["mouse"]["reportMouseShapeChanges"]=self.shapeCheckBox.IsChecked()
config.conf["mouse"]["enableMouseTracking"]=self.mouseTrackingCheckBox.IsChecked()
config.conf["mouse"]["mouseTextUnit"]=self.textUnits[self.textUnitComboBox.GetSelection()]
config.conf["mouse"]["reportObjectRoleOnMouseEnter"]=self.reportObjectRoleCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinatesOnMouseMove"]=self.audioCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinates_detectBrightness"]=self.audioDetectBrightnessCheckBox.IsChecked()
class ReviewCursorPanel(SettingsPanel):
# Translators: This is the label for the review cursor settings panel.
title = _("Review Cursor")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followFocusCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Follow system &focus"))
self.followFocusCheckBox.SetValue(config.conf["reviewCursor"]["followFocus"])
settingsSizer.Add(self.followFocusCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followCaretCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Follow System &Caret"))
self.followCaretCheckBox.SetValue(config.conf["reviewCursor"]["followCaret"])
settingsSizer.Add(self.followCaretCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followMouseCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Follow &mouse cursor"))
self.followMouseCheckBox.SetValue(config.conf["reviewCursor"]["followMouse"])
settingsSizer.Add(self.followMouseCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.simpleReviewModeCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Simple review mode"))
self.simpleReviewModeCheckBox.SetValue(config.conf["reviewCursor"]["simpleReviewMode"])
settingsSizer.Add(self.simpleReviewModeCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["reviewCursor"]["followFocus"]=self.followFocusCheckBox.IsChecked()
config.conf["reviewCursor"]["followCaret"]=self.followCaretCheckBox.IsChecked()
config.conf["reviewCursor"]["followMouse"]=self.followMouseCheckBox.IsChecked()
config.conf["reviewCursor"]["simpleReviewMode"]=self.simpleReviewModeCheckBox.IsChecked()
class InputCompositionPanel(SettingsPanel):
# Translators: This is the label for the Input Composition settings panel.
title = _("Input Composition")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.autoReportAllCandidatesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Automatically report all available &candidates"))
self.autoReportAllCandidatesCheckBox.SetValue(config.conf["inputComposition"]["autoReportAllCandidates"])
settingsSizer.Add(self.autoReportAllCandidatesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.announceSelectedCandidateCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Announce &selected candidate"))
self.announceSelectedCandidateCheckBox.SetValue(config.conf["inputComposition"]["announceSelectedCandidate"])
settingsSizer.Add(self.announceSelectedCandidateCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.candidateIncludesShortCharacterDescriptionCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Always include short character &description when announcing candidates"))
self.candidateIncludesShortCharacterDescriptionCheckBox.SetValue(config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"])
settingsSizer.Add(self.candidateIncludesShortCharacterDescriptionCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportReadingStringChangesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Report changes to the &reading string"))
self.reportReadingStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportReadingStringChanges"])
settingsSizer.Add(self.reportReadingStringChangesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportCompositionStringChangesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Report changes to the &composition string"))
self.reportCompositionStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportCompositionStringChanges"])
settingsSizer.Add(self.reportCompositionStringChangesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["inputComposition"]["autoReportAllCandidates"]=self.autoReportAllCandidatesCheckBox.IsChecked()
config.conf["inputComposition"]["announceSelectedCandidate"]=self.announceSelectedCandidateCheckBox.IsChecked()
config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]=self.candidateIncludesShortCharacterDescriptionCheckBox.IsChecked()
config.conf["inputComposition"]["reportReadingStringChanges"]=self.reportReadingStringChangesCheckBox.IsChecked()
config.conf["inputComposition"]["reportCompositionStringChanges"]=self.reportCompositionStringChangesCheckBox.IsChecked()
class ObjectPresentationPanel(SettingsPanel):
# Translators: This is the label for the object presentation panel.
title = _("Object Presentation")
progressLabels = (
# Translators: An option for progress bar output in the Object Presentation dialog
# which disables reporting of progress bars.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("off", _("off")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by speaking.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("speak", _("Speak")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("beep", _("Beep")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by both speaking and beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("both", _("Speak and beep")),
)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportToolTipsText = _("Report &tooltips")
self.tooltipCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportToolTipsText))
self.tooltipCheckBox.SetValue(config.conf["presentation"]["reportTooltips"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
balloonText = _("Report &help balloons")
self.balloonCheckBox=sHelper.addItem(wx.CheckBox(self,label=balloonText))
self.balloonCheckBox.SetValue(config.conf["presentation"]["reportHelpBalloons"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
shortcutText = _("Report object shortcut &keys")
self.shortcutCheckBox=sHelper.addItem(wx.CheckBox(self,label=shortcutText))
self.shortcutCheckBox.SetValue(config.conf["presentation"]["reportKeyboardShortcuts"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
positionInfoText = _("Report object &position information")
self.positionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=positionInfoText))
self.positionInfoCheckBox.SetValue(config.conf["presentation"]["reportObjectPositionInformation"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
guessPositionInfoText=_("Guess object &position information when unavailable")
self.guessPositionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=guessPositionInfoText))
self.guessPositionInfoCheckBox.SetValue(config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
descriptionText = _("Report object &descriptions")
self.descriptionCheckBox=sHelper.addItem(wx.CheckBox(self,label=descriptionText))
self.descriptionCheckBox.SetValue(config.conf["presentation"]["reportObjectDescriptions"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
progressLabelText = _("Progress &bar output:")
progressChoices = [name for setting, name in self.progressLabels]
self.progressList=sHelper.addLabeledControl(progressLabelText, wx.Choice, choices=progressChoices)
for index, (setting, name) in enumerate(self.progressLabels):
if setting == config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]:
self.progressList.SetSelection(index)
break
else:
log.debugWarning("Could not set progress list to current report progress bar updates setting")
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportBackgroundProgressBarsText = _("Report background progress bars")
self.reportBackgroundProgressBarsCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportBackgroundProgressBarsText))
self.reportBackgroundProgressBarsCheckBox.SetValue(config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
dynamicContentText = _("Report dynamic &content changes")
self.dynamicContentCheckBox=sHelper.addItem(wx.CheckBox(self,label=dynamicContentText))
self.dynamicContentCheckBox.SetValue(config.conf["presentation"]["reportDynamicContentChanges"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
autoSuggestionsLabelText = _("Play a sound when &auto-suggestions appear")
self.autoSuggestionSoundsCheckBox=sHelper.addItem(wx.CheckBox(self,label=autoSuggestionsLabelText))
self.autoSuggestionSoundsCheckBox.SetValue(config.conf["presentation"]["reportAutoSuggestionsWithSound"])
def onSave(self):
config.conf["presentation"]["reportTooltips"]=self.tooltipCheckBox.IsChecked()
config.conf["presentation"]["reportHelpBalloons"]=self.balloonCheckBox.IsChecked()
config.conf["presentation"]["reportKeyboardShortcuts"]=self.shortcutCheckBox.IsChecked()
config.conf["presentation"]["reportObjectPositionInformation"]=self.positionInfoCheckBox.IsChecked()
config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"]=self.guessPositionInfoCheckBox.IsChecked()
config.conf["presentation"]["reportObjectDescriptions"]=self.descriptionCheckBox.IsChecked()
config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]=self.progressLabels[self.progressList.GetSelection()][0]
config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"]=self.reportBackgroundProgressBarsCheckBox.IsChecked()
config.conf["presentation"]["reportDynamicContentChanges"]=self.dynamicContentCheckBox.IsChecked()
config.conf["presentation"]["reportAutoSuggestionsWithSound"]=self.autoSuggestionSoundsCheckBox.IsChecked()
class BrowseModePanel(SettingsPanel):
# Translators: This is the label for the browse mode settings panel.
title = _("Browse Mode")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a textfield in the
# browse mode settings panel.
maxLengthLabel=wx.StaticText(self,-1,label=_("&Maximum number of characters on one line"))
settingsSizer.Add(maxLengthLabel)
self.maxLengthEdit=nvdaControls.SelectOnFocusSpinCtrl(self,
min=10, max=250, # min and max are not enforced in the config for virtualBuffers.maxLineLength
initial=config.conf["virtualBuffers"]["maxLineLength"])
settingsSizer.Add(self.maxLengthEdit,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
pageLinesLabel=wx.StaticText(self,-1,label=_("&Number of lines per page"))
settingsSizer.Add(pageLinesLabel)
self.pageLinesEdit=nvdaControls.SelectOnFocusSpinCtrl(self,
min=5, max=150, # min and max are not enforced in the config for virtualBuffers.linesPerPage
initial=config.conf["virtualBuffers"]["linesPerPage"])
settingsSizer.Add(self.pageLinesEdit,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.useScreenLayoutCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Use &screen layout (when supported)"))
self.useScreenLayoutCheckBox.SetValue(config.conf["virtualBuffers"]["useScreenLayout"])
settingsSizer.Add(self.useScreenLayoutCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.autoSayAllCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Automatic &Say All on page load"))
self.autoSayAllCheckBox.SetValue(config.conf["virtualBuffers"]["autoSayAllOnPageLoad"])
settingsSizer.Add(self.autoSayAllCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.layoutTablesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Include l&ayout tables"))
self.layoutTablesCheckBox.SetValue(config.conf["documentFormatting"]["includeLayoutTables"])
settingsSizer.Add(self.layoutTablesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.autoPassThroughOnFocusChangeCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatic focus mode for focus changes"))
self.autoPassThroughOnFocusChangeCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
settingsSizer.Add(self.autoPassThroughOnFocusChangeCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.autoPassThroughOnCaretMoveCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatic focus mode for caret movement"))
self.autoPassThroughOnCaretMoveCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
settingsSizer.Add(self.autoPassThroughOnCaretMoveCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.passThroughAudioIndicationCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Audio indication of focus and browse modes"))
self.passThroughAudioIndicationCheckBox.SetValue(config.conf["virtualBuffers"]["passThroughAudioIndication"])
settingsSizer.Add(self.passThroughAudioIndicationCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.trapNonCommandGesturesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("&Trap all non-command gestures from reaching the document"))
self.trapNonCommandGesturesCheckBox.SetValue(config.conf["virtualBuffers"]["trapNonCommandGestures"])
settingsSizer.Add(self.trapNonCommandGesturesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["virtualBuffers"]["maxLineLength"]=self.maxLengthEdit.GetValue()
config.conf["virtualBuffers"]["linesPerPage"]=self.pageLinesEdit.GetValue()
config.conf["virtualBuffers"]["useScreenLayout"]=self.useScreenLayoutCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoSayAllOnPageLoad"]=self.autoSayAllCheckBox.IsChecked()
config.conf["documentFormatting"]["includeLayoutTables"]=self.layoutTablesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]=self.autoPassThroughOnFocusChangeCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]=self.autoPassThroughOnCaretMoveCheckBox.IsChecked()
config.conf["virtualBuffers"]["passThroughAudioIndication"]=self.passThroughAudioIndicationCheckBox.IsChecked()
config.conf["virtualBuffers"]["trapNonCommandGestures"]=self.trapNonCommandGesturesCheckBox.IsChecked()
class DocumentFormattingPanel(SettingsPanel):
# Translators: This is the label for the document formatting panel.
title = _("Document Formatting")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label appearing on the document formatting settings panel.
panelText =_("The following options control the types of document formatting reported by NVDA.")
sHelper.addItem(wx.StaticText(self, label=panelText))
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
fontGroupText = _("Font")
fontGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=fontGroupText), wx.VERTICAL))
sHelper.addItem(fontGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontNameText = _("&Font name")
self.fontNameCheckBox=fontGroup.addItem(wx.CheckBox(self, label=fontNameText))
self.fontNameCheckBox.SetValue(config.conf["documentFormatting"]["reportFontName"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontSizeText = _("Font &size")
self.fontSizeCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontSizeText))
self.fontSizeCheckBox.SetValue(config.conf["documentFormatting"]["reportFontSize"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontAttributesText = _("Font attri&butes")
self.fontAttrsCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontAttributesText))
self.fontAttrsCheckBox.SetValue(config.conf["documentFormatting"]["reportFontAttributes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
emphasisText=_("E&mphasis")
self.emphasisCheckBox=fontGroup.addItem(wx.CheckBox(self,label=emphasisText))
self.emphasisCheckBox.SetValue(config.conf["documentFormatting"]["reportEmphasis"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
styleText =_("St&yle")
self.styleCheckBox=fontGroup.addItem(wx.CheckBox(self,label=styleText))
self.styleCheckBox.SetValue(config.conf["documentFormatting"]["reportStyle"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
colorsText = _("&Colors")
self.colorCheckBox=fontGroup.addItem(wx.CheckBox(self,label=colorsText))
self.colorCheckBox.SetValue(config.conf["documentFormatting"]["reportColor"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
documentInfoGroupText = _("Document information")
docInfoGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=documentInfoGroupText), wx.VERTICAL))
sHelper.addItem(docInfoGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
commentsText = _("Co&mments")
self.commentsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=commentsText))
self.commentsCheckBox.SetValue(config.conf["documentFormatting"]["reportComments"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
revisionsText = _("&Editor revisions")
self.revisionsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=revisionsText))
self.revisionsCheckBox.SetValue(config.conf["documentFormatting"]["reportRevisions"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
spellingErrorText = _("Spelling e&rrors")
self.spellingErrorsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=spellingErrorText))
self.spellingErrorsCheckBox.SetValue(config.conf["documentFormatting"]["reportSpellingErrors"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
pageAndSpaceGroupText = _("Pages and spacing")
pageAndSpaceGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=pageAndSpaceGroupText), wx.VERTICAL))
sHelper.addItem(pageAndSpaceGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
pageText = _("&Pages")
self.pageCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=pageText))
self.pageCheckBox.SetValue(config.conf["documentFormatting"]["reportPage"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
lineText = _("Line &numbers")
self.lineNumberCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineText))
self.lineNumberCheckBox.SetValue(config.conf["documentFormatting"]["reportLineNumber"])
# Translators: This is the label for a combobox controlling the reporting of line indentation in the
# Document Formatting dialog (possible choices are Off, Speech, Tones, or Both.
lineIndentationText = _("Line &indentation reporting:")
indentChoices=[
#Translators: A choice in a combo box in the document formatting dialog to report No line Indentation.
_("Off"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with Speech.
pgettext('line indentation setting', "Speech"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with tones.
_("Tones"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with both Speech and tones.
_("Both Speech and Tones")
]
self.lineIndentationCombo = pageAndSpaceGroup.addLabeledControl(lineIndentationText, wx.Choice, choices=indentChoices)
#We use bitwise operations because it saves us a four way if statement.
curChoice = config.conf["documentFormatting"]["reportLineIndentationWithTones"] << 1 | config.conf["documentFormatting"]["reportLineIndentation"]
self.lineIndentationCombo.SetSelection(curChoice)
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report paragraph indentation if available.
paragraphIndentationText = _("&Paragraph indentation")
self.paragraphIndentationCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=paragraphIndentationText))
self.paragraphIndentationCheckBox.SetValue(config.conf["documentFormatting"]["reportParagraphIndentation"])
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report line spacing if available.
lineSpacingText=_("&Line spacing")
self.lineSpacingCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineSpacingText))
self.lineSpacingCheckBox.SetValue(config.conf["documentFormatting"]["reportLineSpacing"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
alignmentText = _("&Alignment")
self.alignmentCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=alignmentText))
self.alignmentCheckBox.SetValue(config.conf["documentFormatting"]["reportAlignment"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
tablesGroupText = _("Table information")
tablesGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=tablesGroupText), wx.VERTICAL))
sHelper.addItem(tablesGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tablesCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("&Tables")))
self.tablesCheckBox.SetValue(config.conf["documentFormatting"]["reportTables"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableHeadersCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Row/column h&eaders")))
self.tableHeadersCheckBox.SetValue(config.conf["documentFormatting"]["reportTableHeaders"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableCellCoordsCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Cell c&oordinates")))
self.tableCellCoordsCheckBox.SetValue(config.conf["documentFormatting"]["reportTableCellCoords"])
borderChoices=[
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Off"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Styles"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Both Colors and Styles"),
]
# Translators: This is the label for a combobox in the
# document formatting settings panel.
self.borderComboBox=tablesGroup.addLabeledControl(_("Cell borders:"), wx.Choice, choices=borderChoices)
curChoice = 0
if config.conf["documentFormatting"]["reportBorderStyle"]:
if config.conf["documentFormatting"]["reportBorderColor"]:
curChoice = 2
else:
curChoice = 1
self.borderComboBox.SetSelection(curChoice)
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
elementsGroupText = _("Elements")
elementsGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=elementsGroupText), wx.VERTICAL))
sHelper.addItem(elementsGroup, flag=wx.EXPAND, proportion=1)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.headingsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Headings")))
self.headingsCheckBox.SetValue(config.conf["documentFormatting"]["reportHeadings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.linksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lin&ks")))
self.linksCheckBox.SetValue(config.conf["documentFormatting"]["reportLinks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.listsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Lists")))
self.listsCheckBox.SetValue(config.conf["documentFormatting"]["reportLists"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.blockQuotesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Block "es")))
self.blockQuotesCheckBox.SetValue(config.conf["documentFormatting"]["reportBlockQuotes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.landmarksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lan&dmarks")))
self.landmarksCheckBox.SetValue(config.conf["documentFormatting"]["reportLandmarks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.framesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Fra&mes")))
self.framesCheckBox.Value=config.conf["documentFormatting"]["reportFrames"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.clickableCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Clickable")))
self.clickableCheckBox.Value=config.conf["documentFormatting"]["reportClickable"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
detectFormatAfterCursorText =_("Report formatting changes after the cursor (can cause a lag)")
self.detectFormatAfterCursorCheckBox=wx.CheckBox(self, label=detectFormatAfterCursorText)
self.detectFormatAfterCursorCheckBox.SetValue(config.conf["documentFormatting"]["detectFormatAfterCursor"])
sHelper.addItem(self.detectFormatAfterCursorCheckBox)
def onSave(self):
config.conf["documentFormatting"]["detectFormatAfterCursor"]=self.detectFormatAfterCursorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontName"]=self.fontNameCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontSize"]=self.fontSizeCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontAttributes"]=self.fontAttrsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportColor"]=self.colorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportComments"]=self.commentsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportRevisions"]=self.revisionsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportEmphasis"]=self.emphasisCheckBox.IsChecked()
config.conf["documentFormatting"]["reportAlignment"]=self.alignmentCheckBox.IsChecked()
config.conf["documentFormatting"]["reportStyle"]=self.styleCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSpellingErrors"]=self.spellingErrorsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportPage"]=self.pageCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineNumber"]=self.lineNumberCheckBox.IsChecked()
choice = self.lineIndentationCombo.GetSelection()
config.conf["documentFormatting"]["reportLineIndentation"] = choice in (1, 3)
config.conf["documentFormatting"]["reportLineIndentationWithTones"] = choice in (2, 3)
config.conf["documentFormatting"]["reportParagraphIndentation"]=self.paragraphIndentationCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineSpacing"]=self.lineSpacingCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTables"]=self.tablesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableHeaders"]=self.tableHeadersCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableCellCoords"]=self.tableCellCoordsCheckBox.IsChecked()
choice = self.borderComboBox.GetSelection()
config.conf["documentFormatting"]["reportBorderStyle"] = choice in (1,2)
config.conf["documentFormatting"]["reportBorderColor"] = (choice == 2)
config.conf["documentFormatting"]["reportLinks"]=self.linksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHeadings"]=self.headingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLists"]=self.listsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportBlockQuotes"]=self.blockQuotesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLandmarks"]=self.landmarksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFrames"]=self.framesCheckBox.Value
config.conf["documentFormatting"]["reportClickable"]=self.clickableCheckBox.Value
class TouchInteractionPanel(SettingsPanel):
# Translators: This is the label for the touch interaction settings panel.
title = _("Touch Interaction")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
self.touchTypingCheckBox=wx.CheckBox(self,wx.NewId(),label=_("&Touch typing mode"))
self.touchTypingCheckBox.SetValue(config.conf["touch"]["touchTyping"])
settingsSizer.Add(self.touchTypingCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["touch"]["touchTyping"]=self.touchTypingCheckBox.IsChecked()
class UwpOcrPanel(SettingsPanel):
# Translators: The title of the Windows 10 OCR panel.
title = _("Windows 10 OCR")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Lazily import this.
from contentRecog import uwpOcr
self.languageCodes = uwpOcr.getLanguages()
languageChoices = [
languageHandler.getLanguageDescription(languageHandler.normalizeLanguage(lang))
for lang in self.languageCodes]
# Translators: Label for an option in the Windows 10 OCR dialog.
languageLabel = _("Recognition &language:")
self.languageChoice = sHelper.addLabeledControl(languageLabel, wx.Choice, choices=languageChoices)
try:
langIndex = self.languageCodes.index(config.conf["uwpOcr"]["language"])
self.languageChoice.Selection = langIndex
except ValueError:
self.languageChoice.Selection = 0
def onSave(self):
lang = self.languageCodes[self.languageChoice.Selection]
config.conf["uwpOcr"]["language"] = lang
class DictionaryEntryDialog(wx.Dialog):
TYPE_LABELS = {
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_ANYWHERE: _("&Anywhere"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_WORD: _("Whole &word"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_REGEXP: _("Regular &expression")
}
TYPE_LABELS_ORDERING = (speechDictHandler.ENTRY_TYPE_ANYWHERE, speechDictHandler.ENTRY_TYPE_WORD, speechDictHandler.ENTRY_TYPE_REGEXP)
# Translators: This is the label for the edit dictionary entry dialog.
def __init__(self, parent, title=_("Edit Dictionary Entry")):
super(DictionaryEntryDialog,self).__init__(parent,title=title)
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is a label for an edit field in add dictionary entry dialog.
patternLabelText = _("&Pattern")
self.patternTextCtrl=sHelper.addLabeledControl(patternLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog and in punctuation/symbol pronunciation dialog.
replacementLabelText = _("&Replacement")
self.replacementTextCtrl=sHelper.addLabeledControl(replacementLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog.
commentLabelText = _("&Comment")
self.commentTextCtrl=sHelper.addLabeledControl(commentLabelText, wx.TextCtrl)
# Translators: This is a label for a checkbox in add dictionary entry dialog.
caseSensitiveText = _("Case &sensitive")
self.caseSensitiveCheckBox=sHelper.addItem(wx.CheckBox(self,label=caseSensitiveText))
# Translators: This is a label for a set of radio buttons in add dictionary entry dialog.
typeText = _("&Type")
typeChoices = [DictionaryEntryDialog.TYPE_LABELS[i] for i in DictionaryEntryDialog.TYPE_LABELS_ORDERING]
self.typeRadioBox=sHelper.addItem(wx.RadioBox(self,label=typeText, choices=typeChoices))
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK|wx.CANCEL))
mainSizer.Add(sHelper.sizer,border=20,flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.setType(speechDictHandler.ENTRY_TYPE_ANYWHERE)
self.patternTextCtrl.SetFocus()
self.Bind(wx.EVT_BUTTON,self.onOk,id=wx.ID_OK)
def getType(self):
typeRadioValue = self.typeRadioBox.GetSelection()
if typeRadioValue == wx.NOT_FOUND:
return speechDictHandler.ENTRY_TYPE_ANYWHERE
return DictionaryEntryDialog.TYPE_LABELS_ORDERING[typeRadioValue]
def onOk(self,evt):
if not self.patternTextCtrl.GetValue():
# Translators: This is an error message to let the user know that the pattern field in the dictionary entry is not valid.
gui.messageBox(_("A pattern is required."), _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
self.patternTextCtrl.SetFocus()
return
try:
self.dictEntry=speechDictHandler.SpeechDictEntry(self.patternTextCtrl.GetValue(),self.replacementTextCtrl.GetValue(),self.commentTextCtrl.GetValue(),bool(self.caseSensitiveCheckBox.GetValue()),self.getType())
except Exception as e:
log.debugWarning("Could not add dictionary entry due to (regex error) : %s" % e)
# Translators: This is an error message to let the user know that the dictionary entry is not valid.
gui.messageBox(_("Regular Expression error: \"%s\".")%e, _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
return
evt.Skip()
def setType(self, type):
self.typeRadioBox.SetSelection(DictionaryEntryDialog.TYPE_LABELS_ORDERING.index(type))
class DictionaryDialog(SettingsDialog):
TYPE_LABELS = {t: l.replace("&", "") for t, l in DictionaryEntryDialog.TYPE_LABELS.iteritems()}
def __init__(self,parent,title,speechDict):
self.title = title
self.speechDict = speechDict
self.tempSpeechDict=speechDictHandler.SpeechDict()
self.tempSpeechDict.extend(self.speechDict)
globalVars.speechDictionaryProcessing=False
super(DictionaryDialog, self).__init__(parent)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the combo box of dictionary entries in speech dictionary dialog.
entriesLabelText=_("&Dictionary entries")
self.dictList=sHelper.addLabeledControl(entriesLabelText, wx.ListCtrl, style=wx.LC_REPORT|wx.LC_SINGLE_SEL,size=(550,350))
# Translators: The label for a column in dictionary entries list used to identify comments for the entry.
self.dictList.InsertColumn(0,_("Comment"),width=150)
# Translators: The label for a column in dictionary entries list used to identify pattern (original word or a pattern).
self.dictList.InsertColumn(1,_("Pattern"),width=150)
# Translators: The label for a column in dictionary entries list and in a list of symbols from symbol pronunciation dialog used to identify replacement for a pattern or a symbol
self.dictList.InsertColumn(2,_("Replacement"),width=150)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is case sensitive or not.
self.dictList.InsertColumn(3,_("case"),width=50)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is a regular expression, matches whole words, or matches anywhere.
self.dictList.InsertColumn(4,_("Type"),width=50)
self.offOn = (_("off"),_("on"))
for entry in self.tempSpeechDict:
self.dictList.Append((entry.comment,entry.pattern,entry.replacement,self.offOn[int(entry.caseSensitive)],DictionaryDialog.TYPE_LABELS[entry.type]))
self.editingIndex=-1
bHelper = guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
addButtonID=wx.NewId()
# Translators: The label for a button in speech dictionaries dialog to add new entries.
bHelper.addButton(self, addButtonID,_("&Add"),wx.DefaultPosition)
editButtonID=wx.NewId()
# Translators: The label for a button in speech dictionaries dialog to edit existing entries.
bHelper.addButton(self, editButtonID,_("&Edit"),wx.DefaultPosition)
removeButtonID=wx.NewId()
bHelper.addButton(self, removeButtonID,_("&Remove"),wx.DefaultPosition)
sHelper.addItem(bHelper)
self.Bind(wx.EVT_BUTTON,self.OnAddClick,id=addButtonID)
self.Bind(wx.EVT_BUTTON,self.OnEditClick,id=editButtonID)
self.Bind(wx.EVT_BUTTON,self.OnRemoveClick,id=removeButtonID)
def postInit(self):
self.dictList.SetFocus()
def onCancel(self,evt):
globalVars.speechDictionaryProcessing=True
super(DictionaryDialog, self).onCancel(evt)
def onOk(self,evt):
globalVars.speechDictionaryProcessing=True
if self.tempSpeechDict!=self.speechDict:
del self.speechDict[:]
self.speechDict.extend(self.tempSpeechDict)
self.speechDict.save()
super(DictionaryDialog, self).onOk(evt)
def OnAddClick(self,evt):
# Translators: This is the label for the add dictionary entry dialog.
entryDialog=DictionaryEntryDialog(self,title=_("Add Dictionary Entry"))
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict.append(entryDialog.dictEntry)
self.dictList.Append((entryDialog.commentTextCtrl.GetValue(),entryDialog.patternTextCtrl.GetValue(),entryDialog.replacementTextCtrl.GetValue(),self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())],DictionaryDialog.TYPE_LABELS[entryDialog.getType()]))
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.Select(index,on=0)
index=self.dictList.GetNextSelected(index)
addedIndex=self.dictList.GetItemCount()-1
self.dictList.Select(addedIndex)
self.dictList.Focus(addedIndex)
self.dictList.SetFocus()
entryDialog.Destroy()
def OnEditClick(self,evt):
if self.dictList.GetSelectedItemCount()!=1:
return
editIndex=self.dictList.GetFirstSelected()
if editIndex<0:
return
entryDialog=DictionaryEntryDialog(self)
entryDialog.patternTextCtrl.SetValue(self.tempSpeechDict[editIndex].pattern)
entryDialog.replacementTextCtrl.SetValue(self.tempSpeechDict[editIndex].replacement)
entryDialog.commentTextCtrl.SetValue(self.tempSpeechDict[editIndex].comment)
entryDialog.caseSensitiveCheckBox.SetValue(self.tempSpeechDict[editIndex].caseSensitive)
entryDialog.setType(self.tempSpeechDict[editIndex].type)
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict[editIndex]=entryDialog.dictEntry
self.dictList.SetStringItem(editIndex,0,entryDialog.commentTextCtrl.GetValue())
self.dictList.SetStringItem(editIndex,1,entryDialog.patternTextCtrl.GetValue())
self.dictList.SetStringItem(editIndex,2,entryDialog.replacementTextCtrl.GetValue())
self.dictList.SetStringItem(editIndex,3,self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())])
self.dictList.SetStringItem(editIndex,4,DictionaryDialog.TYPE_LABELS[entryDialog.getType()])
self.dictList.SetFocus()
entryDialog.Destroy()
def OnRemoveClick(self,evt):
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.DeleteItem(index)
del self.tempSpeechDict[index]
index=self.dictList.GetNextSelected(index)
self.dictList.SetFocus()
class BrailleSettingsPanel(SettingsPanel):
# Translators: This is the label for the braille panel
title = _("Braille")
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the braille display on the braille panel.
displayLabel = _("Braille &display")
displayBox = wx.StaticBox(self, label=displayLabel)
displayGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(displayBox, wx.HORIZONTAL))
settingsSizerHelper.addItem(displayGroup)
displayDesc = braille.handler.display.description
self.displayNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), value=displayDesc, style=wx.TE_READONLY)
# Translators: This is the label for the button used to change braille display,
# it appears in the context of a braille display group on the braille settings panel.
changeDisplayBtn = wx.Button(self, label=_("C&hange..."))
displayGroup.addItem(
guiHelper.associateElements(
self.displayNameCtrl,
changeDisplayBtn
)
)
self.displayNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeDisplay)
changeDisplayBtn.Bind(wx.EVT_BUTTON,self.onChangeDisplay)
self.brailleSubPanel = BrailleSettingsSubPanel(self)
settingsSizerHelper.addItem(self.brailleSubPanel)
def _enterTriggersOnChangeDisplay(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeDisplay(evt)
else:
evt.Skip()
def onChangeDisplay(self, evt):
changeDisplay = BrailleDisplaySelectionDialog(self, multiInstanceAllowed=True)
ret = changeDisplay.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentDisplay(self):
displayDesc = braille.handler.display.description
self.displayNameCtrl.SetValue(displayDesc)
def onPanelActivated(self):
self.brailleSubPanel.onPanelActivated()
super(BrailleSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.brailleSubPanel.onPanelDeactivated()
super(BrailleSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.brailleSubPanel.onDiscard()
def onSave(self):
self.brailleSubPanel.onSave()
class BrailleDisplaySelectionDialog(SettingsDialog):
# Translators: This is the label for the braille display selection dialog.
title = _("Select Braille Display")
displayNames = []
possiblePorts = []
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for a setting in braille settings to choose a braille display.
displayLabelText = _("Braille &display:")
self.displayList = sHelper.addLabeledControl(displayLabelText, wx.Choice, choices=[])
self.Bind(wx.EVT_CHOICE, self.onDisplayNameChanged, self.displayList)
# Translators: The label for a setting in braille settings to choose the connection port (if the selected braille display supports port selection).
portsLabelText = _("&Port:")
self.portsList = sHelper.addLabeledControl(portsLabelText, wx.Choice, choices=[])
self.updateBrailleDisplayLists()
def postInit(self):
# Finally, ensure that focus is on the list of displays.
self.displayList.SetFocus()
def updateBrailleDisplayLists(self):
driverList = braille.getDisplayList()
self.displayNames = [driver[0] for driver in driverList]
displayChoices = [driver[1] for driver in driverList]
self.displayList.Clear()
self.displayList.AppendItems(displayChoices)
try:
selection = self.displayNames.index(braille.handler.display.name)
self.displayList.SetSelection(selection)
except:
pass
self.updatePossiblePorts()
def updatePossiblePorts(self):
displayName = self.displayNames[self.displayList.GetSelection()]
displayCls = braille._getDisplayDriver(displayName)
self.possiblePorts = []
try:
self.possiblePorts.extend(displayCls.getPossiblePorts().iteritems())
except NotImplementedError:
pass
if self.possiblePorts:
self.portsList.SetItems([p[1] for p in self.possiblePorts])
try:
selectedPort = config.conf["braille"][displayName].get("port")
portNames = [p[0] for p in self.possiblePorts]
selection = portNames.index(selectedPort)
except (KeyError, ValueError):
# Display name not in config or port not valid
selection = 0
self.portsList.SetSelection(selection)
# If no port selection is possible or only automatic selection is available, disable the port selection control
enable = len(self.possiblePorts) > 0 and not (len(self.possiblePorts) == 1 and self.possiblePorts[0][0] == "auto")
self.portsList.Enable(enable)
def onDisplayNameChanged(self, evt):
self.updatePossiblePorts()
def onOk(self, evt):
if not self.displayNames:
# The list of displays has not been populated yet, so we didn't change anything in this panel
return
display = self.displayNames[self.displayList.GetSelection()]
if display not in config.conf["braille"]:
config.conf["braille"][display] = {}
if self.possiblePorts:
port = self.possiblePorts[self.portsList.GetSelection()][0]
config.conf["braille"][display]["port"] = port
if not braille.handler.setDisplayByName(display):
gui.messageBox(_("Could not load the %s display.")%display, _("Braille Display Error"), wx.OK|wx.ICON_WARNING, self)
return
if self.IsModal():
# Hack: we need to update the display in our parent window before closing.
# Otherwise, NVDA will report the old display even though the new display is reflected visually.
self.Parent.updateCurrentDisplay()
super(BrailleDisplaySelectionDialog, self).onOk(evt)
class BrailleSettingsSubPanel(SettingsPanel):
def makeSettings(self, settingsSizer):
if gui._isDebug():
startTime = time.time()
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
tables = brailleTables.listTables()
# Translators: The label for a setting in braille settings to select the output table (the braille table used to read braille text on the braille display).
outputsLabelText = _("&Output table:")
outTables = [table for table in tables if table.output]
self.outTableNames = [table.fileName for table in outTables]
outTableChoices = [table.displayName for table in outTables]
self.outTableList = sHelper.addLabeledControl(outputsLabelText, wx.Choice, choices=outTableChoices)
try:
selection = self.outTableNames.index(config.conf["braille"]["translationTable"])
self.outTableList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading output tables completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to select the input table (the braille table used to type braille characters on a braille keyboard).
inputLabelText = _("&Input table:")
self.inTables = [table for table in tables if table.input]
inTableChoices = [table.displayName for table in self.inTables]
self.inTableList = sHelper.addLabeledControl(inputLabelText, wx.Choice, choices=inTableChoices)
try:
selection = self.inTables.index(brailleInput.handler.table)
self.inTableList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading input tables completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to expand the current word under cursor to computer braille.
expandAtCursorText = _("E&xpand to computer braille for the word at the cursor")
self.expandAtCursorCheckBox = sHelper.addItem(wx.CheckBox(self, wx.ID_ANY, label=expandAtCursorText))
self.expandAtCursorCheckBox.SetValue(config.conf["braille"]["expandAtCursor"])
# Translators: The label for a setting in braille settings to show the cursor.
showCursorLabelText = _("&Show cursor")
self.showCursorCheckBox = sHelper.addItem(wx.CheckBox(self, label=showCursorLabelText))
self.showCursorCheckBox.Bind(wx.EVT_CHECKBOX, self.onShowCursorChange)
self.showCursorCheckBox.SetValue(config.conf["braille"]["showCursor"])
# Translators: The label for a setting in braille settings to enable cursor blinking.
cursorBlinkLabelText = _("Blink cursor")
self.cursorBlinkCheckBox = sHelper.addItem(wx.CheckBox(self, label=cursorBlinkLabelText))
self.cursorBlinkCheckBox.Bind(wx.EVT_CHECKBOX, self.onBlinkCursorChange)
self.cursorBlinkCheckBox.SetValue(config.conf["braille"]["cursorBlink"])
if not self.showCursorCheckBox.GetValue():
self.cursorBlinkCheckBox.Disable()
# Translators: The label for a setting in braille settings to change cursor blink rate in milliseconds (1 second is 1000 milliseconds).
cursorBlinkRateLabelText = _("Cursor blink rate (ms)")
minBlinkRate = int(config.conf.getConfigValidationParameter(["braille", "cursorBlinkRate"], "min"))
maxBlinkRate = int(config.conf.getConfigValidationParameter(["braille", "cursorBlinkRate"], "max"))
self.cursorBlinkRateEdit = sHelper.addLabeledControl(cursorBlinkRateLabelText, nvdaControls.SelectOnFocusSpinCtrl,
min=minBlinkRate, max=maxBlinkRate, initial=config.conf["braille"]["cursorBlinkRate"])
if not self.showCursorCheckBox.GetValue() or not self.cursorBlinkCheckBox.GetValue() :
self.cursorBlinkRateEdit.Disable()
self.cursorShapes = [s[0] for s in braille.CURSOR_SHAPES]
cursorShapeChoices = [s[1] for s in braille.CURSOR_SHAPES]
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to focus.
cursorShapeFocusLabelText = _("Cursor shape for &focus:")
self.cursorShapeFocusList = sHelper.addLabeledControl(cursorShapeFocusLabelText, wx.Choice, choices=cursorShapeChoices)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeFocus"])
self.cursorShapeFocusList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeFocusList.Disable()
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to review.
cursorShapeReviewLabelText = _("Cursor shape for &review:")
self.cursorShapeReviewList = sHelper.addLabeledControl(cursorShapeReviewLabelText, wx.Choice, choices=cursorShapeChoices)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeReview"])
self.cursorShapeReviewList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeReviewList.Disable()
if gui._isDebug():
log.debug("Loading cursor settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to change how long a message stays on the braille display (in seconds).
messageTimeoutText = _("Message &timeout (sec)")
self.messageTimeoutEdit = sHelper.addLabeledControl(messageTimeoutText, nvdaControls.SelectOnFocusSpinCtrl,
min=int(config.conf.getConfigValidationParameter(["braille", "messageTimeout"], "min")),
max=int(config.conf.getConfigValidationParameter(["braille", "messageTimeout"], "max")),
initial=config.conf["braille"]["messageTimeout"])
# Translators: The label for a setting in braille settings to display a message on the braille display indefinitely.
noMessageTimeoutLabelText = _("Show &messages indefinitely")
self.noMessageTimeoutCheckBox = sHelper.addItem(wx.CheckBox(self, label=noMessageTimeoutLabelText))
self.noMessageTimeoutCheckBox.Bind(wx.EVT_CHECKBOX, self.onNoMessageTimeoutChange)
self.noMessageTimeoutCheckBox.SetValue(config.conf["braille"]["noMessageTimeout"])
if self.noMessageTimeoutCheckBox.GetValue():
self.messageTimeoutEdit.Disable()
if gui._isDebug():
log.debug("Loading timeout settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to set whether braille should be tethered to focus or review cursor.
tetherListText = _("Tether B&raille:")
# Translators: The value for a setting in the braille settings, to set whether braille should be tethered to focus or review cursor.
tetherChoices = [x[1] for x in braille.handler.tetherValues]
self.tetherList = sHelper.addLabeledControl(tetherListText, wx.Choice, choices=tetherChoices)
tetherChoice=braille.handler.TETHER_AUTO if config.conf["braille"]["autoTether"] else config.conf["braille"]["tetherTo"]
selection = (x for x,y in enumerate(braille.handler.tetherValues) if y[0]==tetherChoice).next()
try:
self.tetherList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading tether settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to read by paragraph (if it is checked, the commands to move the display by lines moves the display by paragraphs instead).
readByParagraphText = _("Read by ¶graph")
self.readByParagraphCheckBox = sHelper.addItem(wx.CheckBox(self, label=readByParagraphText))
self.readByParagraphCheckBox.Value = config.conf["braille"]["readByParagraph"]
# Translators: The label for a setting in braille settings to enable word wrap (try to avoid spliting words at the end of the braille display).
wordWrapText = _("Avoid splitting &words when possible")
self.wordWrapCheckBox = sHelper.addItem(wx.CheckBox(self, label=wordWrapText))
self.wordWrapCheckBox.Value = config.conf["braille"]["wordWrap"]
# Translators: The label for a setting in braille settings to select how the context for the focus object should be presented on a braille display.
focusContextPresentationLabelText = _("Focus context presentation:")
self.focusContextPresentationValues = [x[0] for x in braille.focusContextPresentations]
focusContextPresentationChoices = [x[1] for x in braille.focusContextPresentations]
self.focusContextPresentationList = sHelper.addLabeledControl(focusContextPresentationLabelText, wx.Choice, choices=focusContextPresentationChoices)
try:
index=self.focusContextPresentationValues.index(config.conf["braille"]["focusContextPresentation"])
except:
index=0
self.focusContextPresentationList.SetSelection(index)
if gui._isDebug():
log.debug("Finished making settings, now at %.2f seconds from start"%(time.time() - startTime))
def onSave(self):
config.conf["braille"]["translationTable"] = self.outTableNames[self.outTableList.GetSelection()]
brailleInput.handler.table = self.inTables[self.inTableList.GetSelection()]
config.conf["braille"]["expandAtCursor"] = self.expandAtCursorCheckBox.GetValue()
config.conf["braille"]["showCursor"] = self.showCursorCheckBox.GetValue()
config.conf["braille"]["cursorBlink"] = self.cursorBlinkCheckBox.GetValue()
config.conf["braille"]["cursorBlinkRate"] = self.cursorBlinkRateEdit.GetValue()
config.conf["braille"]["cursorShapeFocus"] = self.cursorShapes[self.cursorShapeFocusList.GetSelection()]
config.conf["braille"]["cursorShapeReview"] = self.cursorShapes[self.cursorShapeReviewList.GetSelection()]
config.conf["braille"]["noMessageTimeout"] = self.noMessageTimeoutCheckBox.GetValue()
config.conf["braille"]["messageTimeout"] = self.messageTimeoutEdit.GetValue()
tetherChoice = braille.handler.tetherValues[self.tetherList.GetSelection()][0]
if tetherChoice==braille.handler.TETHER_AUTO:
config.conf["braille"]["autoTether"] = True
config.conf["braille"]["tetherTo"] = braille.handler.TETHER_FOCUS
else:
config.conf["braille"]["autoTether"] = False
braille.handler.setTether(tetherChoice, auto=False)
config.conf["braille"]["readByParagraph"] = self.readByParagraphCheckBox.Value
config.conf["braille"]["wordWrap"] = self.wordWrapCheckBox.Value
config.conf["braille"]["focusContextPresentation"] = self.focusContextPresentationValues[self.focusContextPresentationList.GetSelection()]
def onShowCursorChange(self, evt):
self.cursorBlinkCheckBox.Enable(evt.IsChecked())
self.cursorBlinkRateEdit.Enable(evt.IsChecked() and self.cursorBlinkCheckBox.GetValue())
self.cursorShapeFocusList.Enable(evt.IsChecked())
self.cursorShapeReviewList.Enable(evt.IsChecked())
def onBlinkCursorChange(self, evt):
self.cursorBlinkRateEdit.Enable(evt.IsChecked())
def onNoMessageTimeoutChange(self, evt):
self.messageTimeoutEdit.Enable(not evt.IsChecked())
""" The Id of the category panel in the multi category settings dialog, this is set when the dialog is created
and returned to None when the dialog is destroyed. This can be used by an AppModule for NVDA to identify and announce
changes in name for the panel when categories are changed"""
NvdaSettingsCategoryPanelId = None
""" The name of the config profile currently being edited, if any.
This is set when the currently edited configuration profile is determined and returned to None when the dialog is destroyed.
This can be used by an AppModule for NVDA to identify and announce
changes in the name of the edited configuration profile when categories are changed"""
NvdaSettingsDialogActiveConfigProfile = None
class NVDASettingsDialog(MultiCategorySettingsDialog):
# Translators: This is the label for the NVDA settings dialog.
title = _("NVDA")
categoryClasses=[
GeneralSettingsPanel,
SpeechSettingsPanel,
BrailleSettingsPanel,
KeyboardSettingsPanel,
MouseSettingsPanel,
ReviewCursorPanel,
InputCompositionPanel,
ObjectPresentationPanel,
BrowseModePanel,
DocumentFormattingPanel,
]
if touchHandler.touchSupported():
categoryClasses.append(TouchInteractionPanel)
if winVersion.isUwpOcrAvailable():
categoryClasses.append(UwpOcrPanel)
def makeSettings(self, settingsSizer):
# Ensure that after the settings dialog is created the name is set correctly
super(NVDASettingsDialog, self).makeSettings(settingsSizer)
self._doOnCategoryChange()
def _doOnCategoryChange(self):
global NvdaSettingsDialogActiveConfigProfile
NvdaSettingsDialogActiveConfigProfile = config.conf.profiles[-1].name
if not NvdaSettingsDialogActiveConfigProfile or isinstance(self.currentCategory, GeneralSettingsPanel):
# Translators: The profile name for normal configuration
NvdaSettingsDialogActiveConfigProfile = _("normal configuration")
self.SetTitle(self._getDialogTitle())
def _getDialogTitle(self):
return u"{dialogTitle}: {panelTitle} ({configProfile})".format(
dialogTitle=self.title,
panelTitle=self.currentCategory.title,
configProfile=NvdaSettingsDialogActiveConfigProfile
)
def onCategoryChange(self,evt):
super(NVDASettingsDialog,self).onCategoryChange(evt)
if evt.Skipped:
return
self._doOnCategoryChange()
def Destroy(self):
global NvdaSettingsCategoryPanelId, NvdaSettingsDialogActiveConfigProfile
NvdaSettingsCategoryPanelId = None
NvdaSettingsDialogActiveConfigProfile = None
super(NVDASettingsDialog, self).Destroy()
class AddSymbolDialog(wx.Dialog):
def __init__(self, parent):
# Translators: This is the label for the add symbol dialog.
super(AddSymbolDialog,self).__init__(parent, title=_("Add Symbol"))
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is the label for the edit field in the add symbol dialog.
symbolText = _("Symbol:")
self.identifierTextCtrl = sHelper.addLabeledControl(symbolText, wx.TextCtrl)
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK | wx.CANCEL))
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.identifierTextCtrl.SetFocus()
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
class SpeechSymbolsDialog(SettingsDialog):
def __init__(self,parent):
try:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData(speech.getCurrentLanguage())
except LookupError:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData("en")
self.symbolProcessor = symbolProcessor
# Translators: This is the label for the symbol pronunciation dialog.
# %s is replaced by the language for which symbol pronunciation is being edited.
self.title = _("Symbol Pronunciation (%s)")%languageHandler.getLanguageDescription(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).__init__(parent)
def makeSettings(self, settingsSizer):
symbols = self.symbols = [copy.copy(symbol) for symbol in self.symbolProcessor.computedSymbols.itervalues()]
self.pendingRemovals = {}
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for symbols list in symbol pronunciation dialog.
symbolsText = _("&Symbols")
self.symbolsList = sHelper.addLabeledControl(symbolsText, nvdaControls.AutoWidthColumnListCtrl, autoSizeColumnIndex=0, style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
# Translators: The label for a column in symbols list used to identify a symbol.
self.symbolsList.InsertColumn(0, _("Symbol"))
self.symbolsList.InsertColumn(1, _("Replacement"))
# Translators: The label for a column in symbols list used to identify a symbol's speech level (either none, some, most, all or character).
self.symbolsList.InsertColumn(2, _("Level"))
# Translators: The label for a column in symbols list which specifies when the actual symbol will be sent to the synthesizer (preserved).
# See the "Punctuation/Symbol Pronunciation" section of the User Guide for details.
self.symbolsList.InsertColumn(3, _("Preserve"))
for symbol in symbols:
item = self.symbolsList.Append((symbol.displayName,))
self.updateListItem(item, symbol)
self.symbolsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemFocused)
# Translators: The label for the group of controls in symbol pronunciation dialog to change the pronunciation of a symbol.
changeSymbolText = _("Change selected symbol")
changeSymbolHelper = sHelper.addItem(guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=changeSymbolText), wx.VERTICAL)))
# Used to ensure that event handlers call Skip(). Not calling skip can cause focus problems for controls. More
# generally the advice on the wx documentation is: "In general, it is recommended to skip all non-command events
# to allow the default handling to take place. The command events are, however, normally not skipped as usually
# a single command such as a button click or menu item selection must only be processed by one handler."
def skipEventAndCall(handler):
def wrapWithEventSkip(event):
if event:
event.Skip()
return handler()
return wrapWithEventSkip
# Translators: The label for the edit field in symbol pronunciation dialog to change the replacement text of a symbol.
replacementText = _("&Replacement")
self.replacementEdit = changeSymbolHelper.addLabeledControl(replacementText, wx.TextCtrl)
self.replacementEdit.Bind(wx.EVT_TEXT, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change the speech level of a symbol.
levelText = _("&Level")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
levelChoices = [symbolLevelLabels[level] for level in characterProcessing.SPEECH_SYMBOL_LEVELS]
self.levelList = changeSymbolHelper.addLabeledControl(levelText, wx.Choice, choices=levelChoices)
self.levelList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change when a symbol is sent to the synthesizer.
preserveText = _("&Send actual symbol to synthesizer")
symbolPreserveLabels = characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS
preserveChoices = [symbolPreserveLabels[mode] for mode in characterProcessing.SPEECH_SYMBOL_PRESERVES]
self.preserveList = changeSymbolHelper.addLabeledControl(preserveText, wx.Choice, choices=preserveChoices)
self.preserveList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# disable the "change symbol" controls until a valid item is selected.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
bHelper = sHelper.addItem(guiHelper.ButtonHelper(orientation=wx.HORIZONTAL))
# Translators: The label for a button in the Symbol Pronunciation dialog to add a new symbol.
addButton = bHelper.addButton(self, label=_("&Add"))
# Translators: The label for a button in the Symbol Pronunciation dialog to remove a symbol.
self.removeButton = bHelper.addButton(self, label=_("Re&move"))
self.removeButton.Disable()
addButton.Bind(wx.EVT_BUTTON, self.OnAddClick)
self.removeButton.Bind(wx.EVT_BUTTON, self.OnRemoveClick)
self.editingItem = None
def postInit(self):
self.symbolsList.SetFocus()
def updateListItem(self, item, symbol):
self.symbolsList.SetStringItem(item, 1, symbol.replacement)
self.symbolsList.SetStringItem(item, 2, characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS[symbol.level])
self.symbolsList.SetStringItem(item, 3, characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS[symbol.preserve])
def onSymbolEdited(self):
if self.editingItem is not None:
# Update the symbol the user was just editing.
item = self.editingItem
symbol = self.symbols[item]
symbol.replacement = self.replacementEdit.Value
symbol.level = characterProcessing.SPEECH_SYMBOL_LEVELS[self.levelList.Selection]
symbol.preserve = characterProcessing.SPEECH_SYMBOL_PRESERVES[self.preserveList.Selection]
self.updateListItem(item, symbol)
def onListItemFocused(self, evt):
# Update the editing controls to reflect the newly selected symbol.
item = evt.GetIndex()
symbol = self.symbols[item]
self.editingItem = item
# ChangeValue and Selection property used because they do not cause EVNT_CHANGED to be fired.
self.replacementEdit.ChangeValue(symbol.replacement)
self.levelList.Selection = characterProcessing.SPEECH_SYMBOL_LEVELS.index(symbol.level)
self.preserveList.Selection = characterProcessing.SPEECH_SYMBOL_PRESERVES.index(symbol.preserve)
self.removeButton.Enabled = not self.symbolProcessor.isBuiltin(symbol.identifier)
self.replacementEdit.Enable()
self.levelList.Enable()
self.preserveList.Enable()
evt.Skip()
def OnAddClick(self, evt):
with AddSymbolDialog(self) as entryDialog:
if entryDialog.ShowModal() != wx.ID_OK:
return
identifier = entryDialog.identifierTextCtrl.GetValue()
if not identifier:
return
for index, symbol in enumerate(self.symbols):
if identifier == symbol.identifier:
# Translators: An error reported in the Symbol Pronunciation dialog when adding a symbol that is already present.
gui.messageBox(_('Symbol "%s" is already present.') % identifier,
_("Error"), wx.OK | wx.ICON_ERROR)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
self.symbolsList.SetFocus()
return
addedSymbol = characterProcessing.SpeechSymbol(identifier)
try:
del self.pendingRemovals[identifier]
except KeyError:
pass
addedSymbol.displayName = identifier
addedSymbol.replacement = ""
addedSymbol.level = characterProcessing.SYMLVL_ALL
addedSymbol.preserve = characterProcessing.SYMPRES_NEVER
self.symbols.append(addedSymbol)
item = self.symbolsList.Append((addedSymbol.displayName,))
self.updateListItem(item, addedSymbol)
self.symbolsList.Select(item)
self.symbolsList.Focus(item)
self.symbolsList.SetFocus()
def OnRemoveClick(self, evt):
index = self.symbolsList.GetFirstSelected()
symbol = self.symbols[index]
self.pendingRemovals[symbol.identifier] = symbol
# Deleting from self.symbolsList focuses the next item before deleting,
# so it must be done *before* we delete from self.symbols.
self.symbolsList.DeleteItem(index)
del self.symbols[index]
index = min(index, self.symbolsList.ItemCount - 1)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index, so set editingItem.
self.editingItem = index
self.symbolsList.SetFocus()
def onOk(self, evt):
self.onSymbolEdited()
self.editingItem = None
for symbol in self.pendingRemovals.itervalues():
self.symbolProcessor.deleteSymbol(symbol)
for symbol in self.symbols:
if not symbol.replacement:
continue
self.symbolProcessor.updateSymbol(symbol)
try:
self.symbolProcessor.userSymbols.save()
except IOError as e:
log.error("Error saving user symbols info: %s" % e)
characterProcessing._localeSpeechSymbolProcessors.invalidateLocaleData(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).onOk(evt)
class InputGesturesDialog(SettingsDialog):
# Translators: The title of the Input Gestures dialog where the user can remap input gestures for commands.
title = _("Input Gestures")
def makeSettings(self, settingsSizer):
filterSizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of a text field to search for gestures in the Input Gestures dialog.
filterLabel = wx.StaticText(self, label=pgettext("inputGestures", "&Filter by:"))
filter = wx.TextCtrl(self)
filterSizer.Add(filterLabel, flag=wx.ALIGN_CENTER_VERTICAL)
filterSizer.AddSpacer(guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL)
filterSizer.Add(filter, proportion=1)
settingsSizer.Add(filterSizer, flag=wx.EXPAND)
settingsSizer.AddSpacer(5)
filter.Bind(wx.EVT_TEXT, self.onFilterChange, filter)
tree = self.tree = wx.TreeCtrl(self, size=wx.Size(600, 400), style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE )
self.treeRoot = tree.AddRoot("root")
tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.onTreeSelect)
settingsSizer.Add(tree, proportion=1, flag=wx.EXPAND)
self.gestures = inputCore.manager.getAllGestureMappings(obj=gui.mainFrame.prevFocus, ancestors=gui.mainFrame.prevFocusAncestors)
self.populateTree()
settingsSizer.AddSpacer(guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_VERTICAL)
bHelper = guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to add a gesture in the Input Gestures dialog.
self.addButton = bHelper.addButton(self, label=_("&Add"))
self.addButton.Bind(wx.EVT_BUTTON, self.onAdd)
self.addButton.Disable()
# Translators: The label of a button to remove a gesture in the Input Gestures dialog.
self.removeButton = bHelper.addButton(self, label=_("&Remove"))
self.removeButton.Bind(wx.EVT_BUTTON, self.onRemove)
self.removeButton.Disable()
self.pendingAdds = set()
self.pendingRemoves = set()
settingsSizer.Add(bHelper.sizer)
def postInit(self):
self.tree.SetFocus()
def populateTree(self, filter=''):
if filter:
#This regexp uses a positive lookahead (?=...) for every word in the filter, which just makes sure the word is present in the string to be tested without matching position or order.
# #5060: Escape the filter text to prevent unexpected matches and regexp errors.
# Because we're escaping, words must then be split on "\ ".
filter = re.escape(filter)
filterReg = re.compile(r'(?=.*?' + r')(?=.*?'.join(filter.split('\ ')) + r')', re.U|re.IGNORECASE)
for category in sorted(self.gestures):
treeCat = self.tree.AppendItem(self.treeRoot, category)
commands = self.gestures[category]
for command in sorted(commands):
if filter and not filterReg.match(command):
continue
treeCom = self.tree.AppendItem(treeCat, command)
commandInfo = commands[command]
self.tree.SetItemPyData(treeCom, commandInfo)
for gesture in commandInfo.gestures:
treeGes = self.tree.AppendItem(treeCom, self._formatGesture(gesture))
self.tree.SetItemPyData(treeGes, gesture)
if not self.tree.ItemHasChildren(treeCat):
self.tree.Delete(treeCat)
elif filter:
self.tree.Expand(treeCat)
def onFilterChange(self, evt):
filter=evt.GetEventObject().GetValue()
self.tree.DeleteChildren(self.treeRoot)
self.populateTree(filter)
def _formatGesture(self, identifier):
try:
source, main = inputCore.getDisplayTextForGestureIdentifier(identifier)
# Translators: Describes a gesture in the Input Gestures dialog.
# {main} is replaced with the main part of the gesture; e.g. alt+tab.
# {source} is replaced with the gesture's source; e.g. laptop keyboard.
return _("{main} ({source})").format(main=main, source=source)
except LookupError:
return identifier
def onTreeSelect(self, evt):
item = self.tree.Selection
data = self.tree.GetItemPyData(item)
isCommand = isinstance(data, inputCore.AllGesturesScriptInfo)
isGesture = isinstance(data, basestring)
self.addButton.Enabled = isCommand or isGesture
self.removeButton.Enabled = isGesture
def onAdd(self, evt):
if inputCore.manager._captureFunc:
return
treeCom = self.tree.Selection
scriptInfo = self.tree.GetItemPyData(treeCom)
if not isinstance(scriptInfo, inputCore.AllGesturesScriptInfo):
treeCom = self.tree.GetItemParent(treeCom)
scriptInfo = self.tree.GetItemPyData(treeCom)
# Translators: The prompt to enter a gesture in the Input Gestures dialog.
treeGes = self.tree.AppendItem(treeCom, _("Enter input gesture:"))
self.tree.SelectItem(treeGes)
self.tree.SetFocus()
def addGestureCaptor(gesture):
if gesture.isModifier:
return False
inputCore.manager._captureFunc = None
wx.CallAfter(self._addCaptured, treeGes, scriptInfo, gesture)
return False
inputCore.manager._captureFunc = addGestureCaptor
def _addCaptured(self, treeGes, scriptInfo, gesture):
gids = gesture.normalizedIdentifiers
if len(gids) > 1:
# Multiple choices. Present them in a pop-up menu.
menu = wx.Menu()
for gid in gids:
disp = self._formatGesture(gid)
item = menu.Append(wx.ID_ANY, disp)
self.Bind(wx.EVT_MENU,
lambda evt, gid=gid, disp=disp: self._addChoice(treeGes, scriptInfo, gid, disp),
item)
self.PopupMenu(menu)
if not self.tree.GetItemPyData(treeGes):
# No item was selected, so use the first.
self._addChoice(treeGes, scriptInfo, gids[0],
self._formatGesture(gids[0]))
menu.Destroy()
else:
self._addChoice(treeGes, scriptInfo, gids[0],
self._formatGesture(gids[0]))
def _addChoice(self, treeGes, scriptInfo, gid, disp):
entry = (gid, scriptInfo.moduleName, scriptInfo.className, scriptInfo.scriptName)
try:
# If this was just removed, just undo it.
self.pendingRemoves.remove(entry)
except KeyError:
self.pendingAdds.add(entry)
self.tree.SetItemText(treeGes, disp)
self.tree.SetItemPyData(treeGes, gid)
scriptInfo.gestures.append(gid)
self.onTreeSelect(None)
def onRemove(self, evt):
treeGes = self.tree.Selection
gesture = self.tree.GetItemPyData(treeGes)
treeCom = self.tree.GetItemParent(treeGes)
scriptInfo = self.tree.GetItemPyData(treeCom)
entry = (gesture, scriptInfo.moduleName, scriptInfo.className, scriptInfo.scriptName)
try:
# If this was just added, just undo it.
self.pendingAdds.remove(entry)
except KeyError:
self.pendingRemoves.add(entry)
self.tree.Delete(treeGes)
scriptInfo.gestures.remove(gesture)
self.tree.SetFocus()
def onOk(self, evt):
for gesture, module, className, scriptName in self.pendingRemoves:
try:
inputCore.manager.userGestureMap.remove(gesture, module, className, scriptName)
except ValueError:
# The user wants to unbind a gesture they didn't define.
inputCore.manager.userGestureMap.add(gesture, module, className, None)
for gesture, module, className, scriptName in self.pendingAdds:
try:
# The user might have unbound this gesture,
# so remove this override first.
inputCore.manager.userGestureMap.remove(gesture, module, className, None)
except ValueError:
pass
inputCore.manager.userGestureMap.add(gesture, module, className, scriptName)
if self.pendingAdds or self.pendingRemoves:
# Only save if there is something to save.
try:
inputCore.manager.userGestureMap.save()
except:
log.debugWarning("", exc_info=True)
# Translators: An error displayed when saving user defined input gestures fails.
gui.messageBox(_("Error saving user defined gestures - probably read only file system."),
_("Error"), wx.OK | wx.ICON_ERROR)
super(InputGesturesDialog, self).onOk(evt)
| 1 | 22,600 | I don't think the naming of this setting conveys what it does. Maybe something like "Ignore mouse movement triggered by other applications" | nvaccess-nvda | py |
@@ -26,11 +26,12 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation
public static readonly OpenTelemetryProtocolExporterEventSource Log = new OpenTelemetryProtocolExporterEventSource();
[NonEvent]
- public void FailedToReachCollector(Exception ex)
+ public void FailedToReachCollector(Uri collectorUri, Exception ex)
{
if (Log.IsEnabled(EventLevel.Error, EventKeywords.All))
{
- this.FailedToReachCollector(ex.ToInvariantString());
+ var rawCollectorUri = collectorUri.ToString();
+ this.FailedToReachCollector(rawCollectorUri, ex.ToInvariantString());
}
}
| 1 | // <copyright file="OpenTelemetryProtocolExporterEventSource.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics.Tracing;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation
{
[EventSource(Name = "OpenTelemetry-Exporter-OpenTelemetryProtocol")]
internal class OpenTelemetryProtocolExporterEventSource : EventSource
{
public static readonly OpenTelemetryProtocolExporterEventSource Log = new OpenTelemetryProtocolExporterEventSource();
[NonEvent]
public void FailedToReachCollector(Exception ex)
{
if (Log.IsEnabled(EventLevel.Error, EventKeywords.All))
{
this.FailedToReachCollector(ex.ToInvariantString());
}
}
[NonEvent]
public void ExportMethodException(Exception ex)
{
if (Log.IsEnabled(EventLevel.Error, EventKeywords.All))
{
this.ExportMethodException(ex.ToInvariantString());
}
}
[Event(2, Message = "Exporter failed send data to collector. Data will not be sent. Exception: {0}", Level = EventLevel.Error)]
public void FailedToReachCollector(string ex)
{
this.WriteEvent(2, ex);
}
[Event(3, Message = "Could not translate activity from class '{0}' and method '{1}', span will not be recorded.", Level = EventLevel.Informational)]
public void CouldNotTranslateActivity(string className, string methodName)
{
this.WriteEvent(3, className, methodName);
}
[Event(4, Message = "Unknown error in export method: {0}", Level = EventLevel.Error)]
public void ExportMethodException(string ex)
{
this.WriteEvent(4, ex);
}
[Event(5, Message = "Could not translate metric from class '{0}' and method '{1}', metric will not be recorded.", Level = EventLevel.Informational)]
public void CouldNotTranslateMetric(string className, string methodName)
{
this.WriteEvent(5, className, methodName);
}
[Event(8, Message = "Unsupported value for protocol '{0}' is configured, default protocol 'grpc' will be used.", Level = EventLevel.Warning)]
public void UnsupportedProtocol(string protocol)
{
this.WriteEvent(8, protocol);
}
[Event(9, Message = "Could not translate LogRecord from class '{0}' and method '{1}', log will not be exported.", Level = EventLevel.Informational)]
public void CouldNotTranslateLogRecord(string className, string methodName)
{
this.WriteEvent(9, className, methodName);
}
}
}
| 1 | 22,784 | Didn't went with backwards compatibility since it's still in beta | open-telemetry-opentelemetry-dotnet | .cs |
@@ -82,6 +82,9 @@ python::tuple fragmentMolHelper3(const RDKit::ROMol& mol, python::object ob,
std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>> tres;
std::unique_ptr<std::vector<unsigned int>> v =
pythonObjectToVect<unsigned int>(ob);
+ if (!v) {
+ throw_value_error("invalid value for bondsToCut");
+ }
bool ok = RDKit::MMPA::fragmentMol(mol, tres, *v, minCuts, maxCuts);
python::list pyres;
if (ok) { | 1 | //
// Copyright (C) 2015 Greg Landrum
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#define PY_ARRAY_UNIQUE_SYMBOL rdmmpa_array_API
#include <boost/python.hpp>
#include <GraphMol/ROMol.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <RDBoost/Wrap.h>
#include <GraphMol/MMPA/MMPA.h>
namespace python = boost::python;
namespace {
python::tuple fragmentMolHelper(const RDKit::ROMol& mol, unsigned int maxCuts,
unsigned int maxCutBonds,
const std::string& pattern,
bool resultsAsMols) {
std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>> tres;
bool ok = RDKit::MMPA::fragmentMol(mol, tres, maxCuts, maxCutBonds, pattern);
python::list pyres;
if (ok) {
for (std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>>::
const_iterator pr = tres.begin();
pr != tres.end(); ++pr) {
python::list lres;
if (resultsAsMols) {
lres.append(pr->first);
lres.append(pr->second);
} else {
if (pr->first) {
lres.append(RDKit::MolToSmiles(*(pr->first), true));
} else {
lres.append("");
}
lres.append(RDKit::MolToSmiles(*(pr->second), true));
}
pyres.append(python::tuple(lres));
}
}
return python::tuple(pyres);
}
python::tuple fragmentMolHelper2(const RDKit::ROMol& mol, unsigned int minCuts,
unsigned int maxCuts, unsigned int maxCutBonds,
const std::string& pattern,
bool resultsAsMols) {
std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>> tres;
bool ok = RDKit::MMPA::fragmentMol(mol, tres, minCuts, maxCuts, maxCutBonds,
pattern);
python::list pyres;
if (ok) {
for (std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>>::
const_iterator pr = tres.begin();
pr != tres.end(); ++pr) {
python::list lres;
if (resultsAsMols) {
lres.append(pr->first);
lres.append(pr->second);
} else {
if (pr->first) {
lres.append(RDKit::MolToSmiles(*(pr->first), true));
} else {
lres.append("");
}
lres.append(RDKit::MolToSmiles(*(pr->second), true));
}
pyres.append(python::tuple(lres));
}
}
return python::tuple(pyres);
}
python::tuple fragmentMolHelper3(const RDKit::ROMol& mol, python::object ob,
unsigned int minCuts, unsigned int maxCuts,
bool resultsAsMols) {
std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>> tres;
std::unique_ptr<std::vector<unsigned int>> v =
pythonObjectToVect<unsigned int>(ob);
bool ok = RDKit::MMPA::fragmentMol(mol, tres, *v, minCuts, maxCuts);
python::list pyres;
if (ok) {
for (std::vector<std::pair<RDKit::ROMOL_SPTR, RDKit::ROMOL_SPTR>>::
const_iterator pr = tres.begin();
pr != tres.end(); ++pr) {
python::list lres;
if (resultsAsMols) {
lres.append(pr->first);
lres.append(pr->second);
} else {
if (pr->first) {
lres.append(RDKit::MolToSmiles(*(pr->first), true));
} else {
lres.append("");
}
lres.append(RDKit::MolToSmiles(*(pr->second), true));
}
pyres.append(python::tuple(lres));
}
}
return python::tuple(pyres);
}
} // namespace
BOOST_PYTHON_MODULE(rdMMPA) {
python::scope().attr("__doc__") =
"Module containing a C++ implementation of code for doing MMPA";
std::string docString =
"Does the fragmentation necessary for an MMPA analysis";
python::def("FragmentMol", fragmentMolHelper,
(python::arg("mol"), python::arg("maxCuts") = 3,
python::arg("maxCutBonds") = 20,
python::arg("pattern") = "[#6+0;!$(*=,#[!#6])]!@!=!#[*]",
python::arg("resultsAsMols") = true),
docString.c_str());
python::def("FragmentMol", fragmentMolHelper2,
(python::arg("mol"), python::arg("minCuts"),
python::arg("maxCuts"), python::arg("maxCutBonds"),
python::arg("pattern") = "[#6+0;!$(*=,#[!#6])]!@!=!#[*]",
python::arg("resultsAsMols") = true),
docString.c_str());
python::def("FragmentMol", fragmentMolHelper3,
(python::arg("mol"), python::arg("bondsToCut"),
python::arg("minCuts") = 1, python::arg("maxCuts") = 3,
python::arg("resultsAsMols") = true),
docString.c_str());
}
| 1 | 23,819 | Perhaps bondsToCut must be None or non empty. | rdkit-rdkit | cpp |
@@ -79,6 +79,7 @@ function Sparkline( {
return (
<div className="googlesitekit-analytics-sparkline-chart-wrap">
<GoogleChart
+ chartType="line"
data={ data }
options={ chartOptions }
// eslint-disable-next-line sitekit/camelcase-acronyms | 1 | /**
* Sparkline component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
/**
* WordPress dependencies
*/
import { withInstanceId } from '@wordpress/compose';
/**
* Internal dependencies
*/
import GoogleChart from './GoogleChart';
function Sparkline( {
data,
change,
// eslint-disable-next-line sitekit/camelcase-acronyms
instanceId,
invertChangeColor,
loadSmall,
loadCompressed,
loadHeight,
loadText,
} ) {
if ( ! data ) {
return 'loading...';
}
const positiveColor = ! invertChangeColor ? 'green' : 'red';
const negativeColor = ! invertChangeColor ? 'red' : 'green';
const chartOptions = {
title: '',
backgroundColor: 'transparent',
curveType: 'line',
width: '100%',
height: '50',
enableInteractivity: false,
chartArea: {
height: '100%',
width: '100%',
},
legend: { position: 'none' },
axisFontSize: 0,
hAxis: {
baselineColor: 'none',
ticks: [],
},
vAxis: {
baselineColor: 'none',
ticks: [],
},
axes: [],
colors: [
0 <= ( parseFloat( change ) || 0 ) ? positiveColor : negativeColor,
],
};
return (
<div className="googlesitekit-analytics-sparkline-chart-wrap">
<GoogleChart
data={ data }
options={ chartOptions }
// eslint-disable-next-line sitekit/camelcase-acronyms
id={ `googlesitekit-sparkline-${ instanceId }` }
loadSmall={ loadSmall }
loadCompressed={ loadCompressed }
loadHeight={ loadHeight }
loadText={ loadText }
/>
</div>
);
}
Sparkline.propTypes = {
// eslint-disable-next-line sitekit/camelcase-acronyms
instanceId: PropTypes.number.isRequired,
invertChangeColor: PropTypes.bool,
loadSmall: PropTypes.bool,
loadCompressed: PropTypes.bool,
loadHeight: PropTypes.number,
loadText: PropTypes.bool,
};
Sparkline.defaultProps = {
invertChangeColor: false,
loadSmall: true,
loadCompressed: true,
loadHeight: 46,
loadText: false,
};
export default withInstanceId( Sparkline );
| 1 | 35,430 | See above, we could avoid adding that (same applies below). | google-site-kit-wp | js |
@@ -35,12 +35,9 @@ public class SetNetworkConnection extends WebDriverHandler<Number> implements Js
@SuppressWarnings("unchecked")
@Override
public void setJsonParameters(Map<String, Object> allParameters) throws Exception {
- Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters");
- Map<String, Object> typeMap = parameters.get("type");
-
- type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()),
- Boolean.parseBoolean(typeMap.get("dataEnabled").toString()),
- Boolean.parseBoolean(typeMap.get("airplaneMode").toString()));
+ Map<String, Object> parameters = (Map<String, Object>)allParameters.get("parameters");
+ Long bitmask = (Long) parameters.get("type");
+ type = new ConnectionType(bitmask.intValue());
}
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.server.handler.mobile;
import java.util.Map;
import org.openqa.selenium.mobile.NetworkConnection.ConnectionType;
import org.openqa.selenium.remote.server.JsonParametersAware;
import org.openqa.selenium.remote.server.Session;
import org.openqa.selenium.remote.server.handler.WebDriverHandler;
import org.openqa.selenium.remote.server.handler.html5.Utils;
public class SetNetworkConnection extends WebDriverHandler<Number> implements JsonParametersAware {
private volatile ConnectionType type;
public SetNetworkConnection(Session session) {
super(session);
}
@SuppressWarnings("unchecked")
@Override
public void setJsonParameters(Map<String, Object> allParameters) throws Exception {
Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters");
Map<String, Object> typeMap = parameters.get("type");
type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()),
Boolean.parseBoolean(typeMap.get("dataEnabled").toString()),
Boolean.parseBoolean(typeMap.get("airplaneMode").toString()));
}
@Override
public Number call() throws Exception {
return Integer.parseInt(Utils.getNetworkConnection(getUnwrappedDriver()).setNetworkConnection(type).toString());
}
@Override
public String toString() {
return String.format("[set network connection : %s]", type.toString());
}
}
| 1 | 13,952 | should use Number instead of Long | SeleniumHQ-selenium | rb |
@@ -1,6 +1,7 @@
'use strict';
var assert = require('assert');
+const expect = require('chai').expect;
var co = require('co');
var test = require('./shared').assert;
var setupDatabase = require('./shared').setupDatabase; | 1 | 'use strict';
var assert = require('assert');
var co = require('co');
var test = require('./shared').assert;
var setupDatabase = require('./shared').setupDatabase;
function processResult() {}
describe('Examples', function() {
before(function() {
return setupDatabase(this.configuration);
});
/**
* @ignore
*/
it('first three examples', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise1 =
// Start Example 1
db
.collection('inventory')
.insertOne({
item: 'canvas',
qty: 100,
tags: ['cotton'],
size: { h: 28, w: 35.5, uom: 'cm' }
})
.then(function(result) {
processResult(result);
})
// End Example 1
.then(() => {
return db.collection('inventory').count({});
});
assert.equal(1, yield promise1);
// Start Example 2
var cursor = db.collection('inventory').find({
item: 'canvas'
});
// End Example 2
assert.equal(1, yield cursor.count());
var promise3 =
// Start Example 3
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
tags: ['blank', 'red'],
size: { h: 14, w: 21, uom: 'cm' }
},
{
item: 'mat',
qty: 85,
tags: ['gray'],
size: { h: 27.9, w: 35.5, uom: 'cm' }
},
{
item: 'mousepad',
qty: 25,
tags: ['gel', 'blue'],
size: { h: 19, w: 22.85, uom: 'cm' }
}
])
.then(function(result) {
processResult(result);
})
// End Example 3
.then(() => {
return db.collection('inventory').count({});
});
assert.equal(4, yield promise3);
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query top level fields', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 6
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'A'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 6
yield promise;
assert.equal(5, yield db.collection('inventory').count());
/* eslint-disable */
// Start Example 7
var cursor = db.collection('inventory').find({});
// End Example 7
assert.equal(5, yield cursor.count());
// Start Example 9
var cursor = db.collection('inventory').find({ status: 'D' });
// End Example 9
assert.equal(2, yield cursor.count());
// Start Example 10
var cursor = db.collection('inventory').find({
status: { $in: ['A', 'D'] }
});
// End Example 10
assert.equal(5, yield cursor.count());
// Start Example 11
var cursor = db.collection('inventory').find({
status: 'A',
qty: { $lt: 30 }
});
// End Example 11
assert.equal(1, yield cursor.count());
// Start Example 12
var cursor = db.collection('inventory').find({
$or: [{ status: 'A' }, { qty: { $lt: 30 } }]
});
// End Example 12
assert.equal(3, yield cursor.count());
// Start Example 13
var cursor = db.collection('inventory').find({
status: 'A',
$or: [{ qty: { $lt: 30 } }, { item: { $regex: '^p' } }]
});
// End Example 13
assert.equal(2, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query embedded documents', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 14
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'A'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 14
yield promise;
// Start Example 15
var cursor = db.collection('inventory').find({
size: { h: 14, w: 21, uom: 'cm' }
});
// End Example 15
assert.equal(1, yield cursor.count());
/* eslint-disable */
// Start Example 16
var cursor = db.collection('inventory').find({
size: { w: 21, h: 14, uom: 'cm' }
});
// End Example 16
assert.equal(0, yield cursor.count());
// Start Example 17
var cursor = db.collection('inventory').find({
'size.uom': 'in'
});
// End Example 17
assert.equal(2, yield cursor.count());
// Start Example 18
var cursor = db.collection('inventory').find({
'size.h': { $lt: 15 }
});
// End Example 18
assert.equal(4, yield cursor.count());
// Start Example 19
var cursor = db.collection('inventory').find({
'size.h': { $lt: 15 },
'size.uom': 'in',
status: 'D'
});
// End Example 19
assert.equal(1, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query arrays', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 20
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
tags: ['blank', 'red'],
dim_cm: [14, 21]
},
{
item: 'notebook',
qty: 50,
tags: ['red', 'blank'],
dim_cm: [14, 21]
},
{
item: 'paper',
qty: 100,
tags: ['red', 'blank', 'plain'],
dim_cm: [14, 21]
},
{
item: 'planner',
qty: 75,
tags: ['blank', 'red'],
dim_cm: [22.85, 30]
},
{
item: 'postcard',
qty: 45,
tags: ['blue'],
dim_cm: [10, 15.25]
}
])
.then(function(result) {
processResult(result);
});
// End Example 20
yield promise;
/* eslint-disable */
// Start Example 21
var cursor = db.collection('inventory').find({
tags: ['red', 'blank']
});
// End Example 21
assert.equal(1, yield cursor.count());
// Start Example 22
var cursor = db.collection('inventory').find({
tags: { $all: ['red', 'blank'] }
});
// End Example 22
assert.equal(4, yield cursor.count());
// Start Example 23
var cursor = db.collection('inventory').find({
tags: 'red'
});
// End Example 23
assert.equal(4, yield cursor.count());
// Start Example 24
var cursor = db.collection('inventory').find({
dim_cm: { $gt: 25 }
});
// End Example 24
assert.equal(1, yield cursor.count());
// Start Example 25
var cursor = db.collection('inventory').find({
dim_cm: { $gt: 15, $lt: 20 }
});
// End Example 25
assert.equal(4, yield cursor.count());
// Start Example 26
var cursor = db.collection('inventory').find({
dim_cm: { $elemMatch: { $gt: 22, $lt: 30 } }
});
// End Example 26
assert.equal(1, yield cursor.count());
// Start Example 27
var cursor = db.collection('inventory').find({
'dim_cm.1': { $gt: 25 }
});
// End Example 27
assert.equal(1, yield cursor.count());
// Start Example 28
var cursor = db.collection('inventory').find({
tags: { $size: 3 }
});
// End Example 28
assert.equal(1, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query array of documents', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 29
db
.collection('inventory')
.insertMany([
{
item: 'journal',
instock: [{ warehouse: 'A', qty: 5 }, { warehouse: 'C', qty: 15 }]
},
{
item: 'notebook',
instock: [{ warehouse: 'C', qty: 5 }]
},
{
item: 'paper',
instock: [{ warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 15 }]
},
{
item: 'planner',
instock: [{ warehouse: 'A', qty: 40 }, { warehouse: 'B', qty: 5 }]
},
{
item: 'postcard',
instock: [{ warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 }]
}
])
.then(function(result) {
processResult(result);
});
// End Example 29
yield promise;
// Start Example 30
var cursor = db.collection('inventory').find({
instock: { warehouse: 'A', qty: 5 }
});
// End Example 30
assert.equal(1, yield cursor.count());
/* eslint-disable */
// Start Example 31
var cursor = db.collection('inventory').find({
instock: { qty: 5, warehouse: 'A' }
});
// End Example 31
assert.equal(0, yield cursor.count());
// Start Example 32
var cursor = db.collection('inventory').find({
'instock.0.qty': { $lte: 20 }
});
// End Example 32
assert.equal(3, yield cursor.count());
// Start Example 33
var cursor = db.collection('inventory').find({
'instock.qty': { $lte: 20 }
});
// End Example 33
assert.equal(5, yield cursor.count());
// Start Example 34
var cursor = db.collection('inventory').find({
instock: { $elemMatch: { qty: 5, warehouse: 'A' } }
});
// End Example 34
assert.equal(1, yield cursor.count());
// Start Example 35
var cursor = db.collection('inventory').find({
instock: { $elemMatch: { qty: { $gt: 10, $lte: 20 } } }
});
// End Example 35
assert.equal(3, yield cursor.count());
// Start Example 36
var cursor = db.collection('inventory').find({
'instock.qty': { $gt: 10, $lte: 20 }
});
// End Example 36
assert.equal(4, yield cursor.count());
// Start Example 37
var cursor = db.collection('inventory').find({
'instock.qty': 5,
'instock.warehouse': 'A'
});
// End Example 37
assert.equal(2, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query null', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 38
db
.collection('inventory')
.insertMany([{ _id: 1, item: null }, { _id: 2 }])
.then(function(result) {
processResult(result);
});
// End Example 38
yield promise;
// Start Example 39
var cursor = db.collection('inventory').find({
item: null
});
// End Example 39
assert.equal(2, yield cursor.count());
/* eslint-disable */
// Start Example 40
var cursor = db.collection('inventory').find({
item: { $type: 10 }
});
// End Example 40
assert.equal(1, yield cursor.count());
// Start Example 41
var cursor = db.collection('inventory').find({
item: { $exists: false }
});
// End Example 41
assert.equal(1, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('projection', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 42
db
.collection('inventory')
.insertMany([
{
item: 'journal',
status: 'A',
size: { h: 14, w: 21, uom: 'cm' },
instock: [{ warehouse: 'A', qty: 5 }]
},
{
item: 'notebook',
status: 'A',
size: { h: 8.5, w: 11, uom: 'in' },
instock: [{ warehouse: 'C', qty: 5 }]
},
{
item: 'paper',
status: 'D',
size: { h: 8.5, w: 11, uom: 'in' },
instock: [{ warehouse: 'A', qty: 60 }]
},
{
item: 'planner',
status: 'D',
size: { h: 22.85, w: 30, uom: 'cm' },
instock: [{ warehouse: 'A', qty: 40 }]
},
{
item: 'postcard',
status: 'A',
size: { h: 10, w: 15.25, uom: 'cm' },
instock: [{ warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 }]
}
])
.then(function(result) {
processResult(result);
});
// End Example 42
yield promise;
// Start Example 43
var cursor = db.collection('inventory').find({
status: 'A'
});
// End Example 43
assert.equal(3, yield cursor.count());
/* eslint-disable */
// Start Example 44
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1 });
// End Example 44
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
assert.equal(undefined, doc.instock);
});
// Start Example 45
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, _id: 0 });
// End Example 45
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal(undefined, doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
assert.equal(undefined, doc.instock);
});
// Start Example 46
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ status: 0, instock: 0 });
// End Example 46
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.equal(undefined, doc.status);
assert.ok(doc.size);
assert.equal(undefined, doc.instock);
});
// Start Example 47
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, 'size.uom': 1 });
// End Example 47
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.ok(doc.size);
assert.equal(undefined, doc.instock);
var size = doc.size;
assert.ok(size.uom);
assert.equal(undefined, size.h);
assert.equal(undefined, size.w);
});
// Start Example 48
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ 'size.uom': 0 });
// End Example 48
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.ok(doc.size);
assert.ok(doc.instock);
var size = doc.size;
assert.equal(undefined, size.uom);
assert.ok(size.h);
assert.ok(size.w);
});
// Start Example 49
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, 'instock.qty': 1 });
// End Example 49
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
doc.instock.forEach(function(subdoc) {
assert.equal(undefined, subdoc.warehouse);
assert.ok(subdoc.qty);
});
});
// Start Example 50
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, instock: { $slice: -1 } });
// End Example 50
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
assert.ok(doc.instock);
assert.equal(1, doc.instock.length);
});
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('update and replace', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 51
db
.collection('inventory')
.insertMany([
{
item: 'canvas',
qty: 100,
size: { h: 28, w: 35.5, uom: 'cm' },
status: 'A'
},
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'mat',
qty: 85,
size: { h: 27.9, w: 35.5, uom: 'cm' },
status: 'A'
},
{
item: 'mousepad',
qty: 25,
size: { h: 19, w: 22.85, uom: 'cm' },
status: 'P'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'P'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
},
{
item: 'sketchbook',
qty: 80,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'sketch pad',
qty: 95,
size: { h: 22.85, w: 30.5, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 51
yield promise;
promise =
// Start Example 52
db
.collection('inventory')
.updateOne(
{ item: 'paper' },
{
$set: { 'size.uom': 'cm', status: 'P' },
$currentDate: { lastModified: true }
}
)
.then(function(result) {
processResult(result);
// process result
});
// End Example 52
yield promise;
var cursor = db.collection('inventory').find({
item: 'paper'
});
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal('cm', doc.size.uom);
assert.equal('P', doc.status);
assert.ok(doc.lastModified);
});
promise =
// Start Example 53
db
.collection('inventory')
.updateMany(
{ qty: { $lt: 50 } },
{
$set: { 'size.uom': 'in', status: 'P' },
$currentDate: { lastModified: true }
}
)
.then(function(result) {
processResult(result);
});
// End Example 53
yield promise;
cursor = db.collection('inventory').find({
qty: { $lt: 50 }
});
docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal('in', doc.size.uom);
assert.equal('P', doc.status);
assert.ok(doc.lastModified);
});
promise =
// Start Example 54
db
.collection('inventory')
.replaceOne(
{ item: 'paper' },
{
$set: {
item: 'paper',
instock: [{ warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 40 }]
},
$unset: {
qty: '',
size: '',
status: '',
lastModified: ''
}
}
)
.then(function(result) {
processResult(result);
});
// End Example 54
yield promise;
cursor = db
.collection('inventory')
.find({
item: 'paper'
})
.project({ _id: 0 });
docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal(2, Object.keys(doc).length);
assert.ok(doc.item);
assert.ok(doc.instock);
assert.equal(2, doc.instock.length);
});
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('delete', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 55
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'P'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 55
yield promise;
var cursor = db.collection('inventory').find({});
assert.equal(5, yield cursor.count());
promise =
// Start Example 57
db
.collection('inventory')
.deleteMany({
status: 'A'
})
.then(function(result) {
processResult(result);
});
// End Example 57
yield promise;
cursor = db.collection('inventory').find({});
assert.equal(3, yield cursor.count());
promise =
// Start Example 58
db
.collection('inventory')
.deleteOne({
status: 'D'
})
.then(function(result) {
processResult(result);
});
// End Example 58
yield promise;
cursor = db.collection('inventory').find({});
assert.equal(2, yield cursor.count());
promise =
// Start Example 56
db
.collection('inventory')
.deleteMany({})
.then(function(result) {
processResult(result);
});
// End Example 56
yield promise;
cursor = db.collection('inventory').find({});
assert.equal(0, yield cursor.count());
client.close();
done();
});
});
}
});
it('supports array filters when updating', {
metadata: {
requires: {
mongodb: '>=3.6.x',
topology: ['single']
}
},
test: function(done) {
const configuration = this.configuration;
const MongoClient = configuration.newClient();
MongoClient.connect(function(err, client) {
const db = client.db(configuration.db);
const collection = db.collection('arrayFilterUpdateExample');
// 3. Exploiting the power of arrays
collection.updateOne(
{ _id: 1 },
{ $set: { 'a.$[i].b': 2 } },
{ arrayFilters: [{ 'i.b': 0 }] },
function updated(err, result) {
assert.equal(err, null);
assert.equal(typeof result, 'object');
client.close();
done();
}
);
});
}
});
});
| 1 | 14,158 | Should the rest of the file be updated to use `expect` or should this test use the same format as the rest of the tests? | mongodb-node-mongodb-native | js |
@@ -227,6 +227,12 @@ func (a *AWSActuator) Refresh() error {
}
logger.Debug("Found hosted zone")
a.zoneID = &zoneID
+
+ // Update dnsZone status now that we have the zoneID
+ if err := a.ModifyStatus(); err != nil {
+ a.logger.WithError(err).Error("failed to update status after refresh")
+ return err
+ }
}
if a.zoneID == nil { | 1 | package dnszone
import (
"errors"
"fmt"
"strings"
log "github.com/sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/aws/aws-sdk-go/service/route53"
corev1 "k8s.io/api/core/v1"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
awsclient "github.com/openshift/hive/pkg/awsclient"
"github.com/openshift/hive/pkg/constants"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
)
const (
hiveDNSZoneAWSTag = "hive.openshift.io/dnszone"
)
// Ensure AWSActuator implements the Actuator interface. This will fail at compile time when false.
var _ Actuator = &AWSActuator{}
// AWSActuator manages getting the desired state, getting the current state and reconciling the two.
type AWSActuator struct {
// logger is the logger used for this controller
logger log.FieldLogger
// awsClient is a utility for making it easy for controllers to interface with AWS
awsClient awsclient.Client
// zoneID is the ID of the hosted zone in route53
zoneID *string
// currentTags are the list of tags associated with the currentHostedZone
currentHostedZoneTags []*route53.Tag
// The DNSZone that represents the desired state.
dnsZone *hivev1.DNSZone
}
type awsClientBuilderType func(secret *corev1.Secret, region string) (awsclient.Client, error)
// NewAWSActuator creates a new AWSActuator object. A new AWSActuator is expected to be created for each controller sync.
func NewAWSActuator(
logger log.FieldLogger,
secret *corev1.Secret,
dnsZone *hivev1.DNSZone,
awsClientBuilder awsClientBuilderType,
) (*AWSActuator, error) {
region := dnsZone.Spec.AWS.Region
if region == "" {
region = constants.AWSRoute53Region
}
awsClient, err := awsClientBuilder(secret, region)
if err != nil {
logger.WithError(err).Error("Error creating AWSClient")
return nil, err
}
awsActuator := &AWSActuator{
logger: logger,
awsClient: awsClient,
dnsZone: dnsZone,
}
return awsActuator, nil
}
// UpdateMetadata ensures that the Route53 hosted zone metadata is current with the DNSZone
func (a *AWSActuator) UpdateMetadata() error {
if a.zoneID == nil {
return errors.New("zoneID is unpopulated")
}
// For now, tags are the only things we can sync with existing zones.
return a.syncTags()
}
// syncTags determines if there are changes that need to happen to match tags in the spec
func (a *AWSActuator) syncTags() error {
existingTags := a.currentHostedZoneTags
expected := a.expectedTags()
toAdd := []*route53.Tag{}
toDelete := make([]*route53.Tag, len(existingTags))
// Initially add all existing tags to the toDelete array
// As they're found in the expected array, remove them from
// the toDelete array
copy(toDelete, existingTags)
logger := a.logger.WithField("id", a.zoneID)
logger.WithField("current", tagsString(existingTags)).WithField("expected", tagsString(expected)).Debug("syncing tags")
for _, tag := range expected {
found := false
for i, actualTag := range toDelete {
if tagEquals(tag, actualTag) {
found = true
toDelete = append(toDelete[:i], toDelete[i+1:]...)
logger.WithField("tag", tagString(tag)).Debug("tag already exists, will not be added")
break
}
}
if !found {
logger.WithField("tag", tagString(tag)).Debug("tag will be added")
toAdd = append(toAdd, tag)
}
}
if len(toDelete) == 0 && len(toAdd) == 0 {
logger.Debug("tags are in sync, no action required")
return nil
}
keysToDelete := make([]*string, 0, len(toDelete))
for _, tag := range toDelete {
logger.WithField("tag", tagString(tag)).Debug("tag will be deleted")
keysToDelete = append(keysToDelete, tag.Key)
}
// Only 10 tags can be added/removed at a time. Iterate until all tags are added/removed
index := 0
for len(toAdd) > index || len(keysToDelete) > index {
toAddSegment := []*route53.Tag{}
keysToDeleteSegment := []*string{}
if len(toAdd) > index {
toAddSegment = toAdd[index:min(index+10, len(toAdd))]
}
if len(keysToDelete) > index {
keysToDeleteSegment = keysToDelete[index:min(index+10, len(keysToDelete))]
}
if len(toAddSegment) == 0 {
toAddSegment = nil
}
if len(keysToDeleteSegment) == 0 {
keysToDeleteSegment = nil
}
logger.Debugf("Adding %d tags, deleting %d tags", len(toAddSegment), len(keysToDeleteSegment))
_, err := a.awsClient.ChangeTagsForResource(&route53.ChangeTagsForResourceInput{
AddTags: toAddSegment,
RemoveTagKeys: keysToDeleteSegment,
ResourceId: a.zoneID,
ResourceType: aws.String("hostedzone"),
})
if err != nil {
logger.WithError(err).Error("Cannot update tags for hosted zone")
return err
}
index += 10
}
return nil
}
// ModifyStatus updates the DnsZone's status with AWS specific information.
func (a *AWSActuator) ModifyStatus() error {
if a.zoneID == nil {
return errors.New("zoneID is unpopulated")
}
a.dnsZone.Status.AWS = &hivev1.AWSDNSZoneStatus{
ZoneID: a.zoneID,
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// Refresh gets the AWS object for the zone.
// If a zone cannot be found or no longer exists, actuator.zoneID remains unset.
func (a *AWSActuator) Refresh() error {
var zoneIDs []string
var err error
if a.dnsZone.Status.AWS != nil && a.dnsZone.Status.AWS.ZoneID != nil {
a.logger.Debug("Zone ID is set in status, will retrieve by ID")
zoneIDs = []string{*a.dnsZone.Status.AWS.ZoneID}
}
if len(zoneIDs) == 0 {
a.logger.Debug("Zone ID is not set in status, looking up by tag")
zoneIDs, err = a.findZoneIDsByTag()
if err != nil {
a.logger.WithError(err).Error("Failed to lookup zone by tag")
return err
}
}
if len(zoneIDs) == 0 {
a.logger.Debug("No matching existing zone found")
return nil
}
// Fetch the hosted zone
a.zoneID = nil
for _, zoneID := range zoneIDs {
logger := a.logger.WithField("id", zoneID)
logger.Debug("Fetching hosted zone by ID")
resp, err := a.awsClient.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zoneID)})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == route53.ErrCodeNoSuchHostedZone {
logger.Debug("Zone no longer exists")
continue
}
}
logger.WithError(err).Error("Cannot get hosted zone")
return err
}
if name := *resp.HostedZone.Name; name != controllerutils.Dotted(a.dnsZone.Spec.Zone) {
logger.WithField("zoneName", name).Debug("Zone name does not match expected name")
continue
}
logger.Debug("Found hosted zone")
a.zoneID = &zoneID
}
if a.zoneID == nil {
a.logger.Debug("No existing zone found")
return nil
}
logger := a.logger.WithField("id", a.zoneID)
logger.Debug("Fetching hosted zone tags")
tags, err := a.existingTags(a.zoneID)
if err != nil {
logger.WithError(err).Error("Cannot get hosted zone tags")
return err
}
a.currentHostedZoneTags = tags
return nil
}
func (a *AWSActuator) findZoneIDsByTag() ([]string, error) {
var ids []string
tagFilter := &resourcegroupstaggingapi.TagFilter{
Key: aws.String(hiveDNSZoneAWSTag),
Values: []*string{aws.String(fmt.Sprintf("%s/%s", a.dnsZone.Namespace, a.dnsZone.Name))},
}
filterString := fmt.Sprintf("%s=%s", aws.StringValue(tagFilter.Key), aws.StringValue(tagFilter.Values[0]))
a.logger.WithField("filter", filterString).Debug("Searching for zone by tag")
id := ""
err := a.awsClient.GetResourcesPages(&resourcegroupstaggingapi.GetResourcesInput{
ResourceTypeFilters: []*string{aws.String("route53:hostedzone")},
TagFilters: []*resourcegroupstaggingapi.TagFilter{tagFilter},
}, func(resp *resourcegroupstaggingapi.GetResourcesOutput, lastPage bool) bool {
for _, zone := range resp.ResourceTagMappingList {
logger := a.logger.WithField("arn", aws.StringValue(zone.ResourceARN))
logger.Debug("Processing search result")
zoneARN, err := arn.Parse(aws.StringValue(zone.ResourceARN))
if err != nil {
logger.WithError(err).Error("Failed to parse hostedzone ARN")
continue
}
elems := strings.Split(zoneARN.Resource, "/")
if len(elems) != 2 || elems[0] != "hostedzone" {
logger.Error("Unexpected hostedzone ARN")
continue
}
id = elems[1]
logger.WithField("id", id).Debug("Found hosted zone")
ids = append(ids, id)
}
return true
})
return ids, err
}
func (a *AWSActuator) expectedTags() []*route53.Tag {
tags := []*route53.Tag{
{
Key: aws.String(hiveDNSZoneAWSTag),
Value: aws.String(fmt.Sprintf("%s/%s", a.dnsZone.Namespace, a.dnsZone.Name)),
},
}
if a.dnsZone.Spec.AWS != nil {
for _, tag := range a.dnsZone.Spec.AWS.AdditionalTags {
tags = append(tags, &route53.Tag{
Key: aws.String(tag.Key),
Value: aws.String(tag.Value),
})
}
}
a.logger.WithField("tags", tagsString(tags)).Debug("Expected tags")
return tags
}
func (a *AWSActuator) existingTags(zoneID *string) ([]*route53.Tag, error) {
logger := a.logger.WithField("id", aws.StringValue(zoneID))
logger.Debug("listing existing tags for zone")
resp, err := a.awsClient.ListTagsForResource(&route53.ListTagsForResourceInput{
ResourceId: zoneID,
ResourceType: aws.String("hostedzone"),
})
if err != nil {
logger.WithError(err).Error("cannot list tags for zone")
return nil, err
}
logger.WithField("tags", tagsString(resp.ResourceTagSet.Tags)).Debug("retrieved zone tags")
return resp.ResourceTagSet.Tags, nil
}
// Create makes an AWS Route53 hosted zone given the DNSZone object.
func (a *AWSActuator) Create() error {
logger := a.logger.WithField("zone", a.dnsZone.Spec.Zone)
logger.Info("Creating route53 hostedzone")
var hostedZone *route53.HostedZone
resp, err := a.awsClient.CreateHostedZone(&route53.CreateHostedZoneInput{
Name: aws.String(a.dnsZone.Spec.Zone),
// We use the UID of the HostedZone resource as the caller reference so that if
// we fail to update the status of the HostedZone with the ID of the recently
// created zone, we don't attempt to recreate it. Same if communication fails on
// the response from AWS.
CallerReference: aws.String(string(a.dnsZone.UID)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == route53.ErrCodeHostedZoneAlreadyExists {
// If the zone was already created, we need to find its ID
logger.WithField("callerRef", a.dnsZone.UID).Debug("Hosted zone already exists, looking up by caller reference")
hostedZone, err = a.findZoneByCallerReference(a.dnsZone.Spec.Zone, string(a.dnsZone.UID))
if err != nil {
logger.Error("Failed to find zone by caller reference")
return err
}
} else {
logger.WithError(err).Error("Error creating hosted zone")
return err
}
} else {
logger.Debug("Hosted zone successfully created")
hostedZone = resp.HostedZone
}
logger = logger.WithField("id", aws.StringValue(hostedZone.Id))
logger.Debug("Fetching zone tags")
existingTags, err := a.existingTags(hostedZone.Id)
if err != nil {
logger.WithError(err).Error("Failed to fetch zone tags")
return err
}
a.zoneID = hostedZone.Id
a.currentHostedZoneTags = existingTags
logger.Debug("Syncing zone tags")
err = a.syncTags()
if err != nil {
// When an error occurs tagging the resource, we return an error. This will result in a retry of the create call.
// Because we're using the DNSZone's UID as the CallerReference, the create should succeed without creating a duplicate
// zone. We will then retry adding the tags.
logger.WithError(err).Error("Failed to apply tags to newly created zone")
return err
}
return err
}
func (a *AWSActuator) findZoneByCallerReference(domain, callerRef string) (*route53.HostedZone, error) {
logger := a.logger.WithField("domain", domain).WithField("callerRef", callerRef)
logger.Debug("Searching for zone by domain and callerRef")
var nextZoneID *string
var nextName = aws.String(domain)
for {
logger.Debug("listing hosted zones by name")
resp, err := a.awsClient.ListHostedZonesByName(&route53.ListHostedZonesByNameInput{
DNSName: nextName,
HostedZoneId: nextZoneID,
MaxItems: aws.String("50"),
})
if err != nil {
logger.WithError(err).Error("cannot list zones by name")
return nil, err
}
for _, zone := range resp.HostedZones {
if aws.StringValue(zone.CallerReference) == callerRef {
logger.WithField("id", aws.StringValue(zone.Id)).Debug("found hosted zone matching caller reference")
return zone, nil
}
if aws.StringValue(zone.Name) != domain {
logger.WithField("zone", aws.StringValue(zone.Name)).Debug("reached zone with different domain name, aborting search")
return nil, fmt.Errorf("Hosted zone not found")
}
}
if !aws.BoolValue(resp.IsTruncated) {
logger.Debug("reached end of results, did not find hosted zone")
return nil, fmt.Errorf("Hosted zone not found")
}
nextZoneID = resp.NextHostedZoneId
nextName = resp.NextDNSName
}
}
// Delete removes an AWS Route53 hosted zone, typically because the DNSZone object is in a deleting state.
func (a *AWSActuator) Delete() error {
if a.zoneID == nil {
return errors.New("zoneID is unpopulated")
}
logger := a.logger.WithField("zone", a.dnsZone.Spec.Zone).WithField("id", aws.StringValue(a.zoneID))
if err := a.deleteRecordSets(logger); err != nil {
return err
}
logger.Info("Deleting route53 hostedzone")
_, err := a.awsClient.DeleteHostedZone(&route53.DeleteHostedZoneInput{
Id: a.zoneID,
})
if err != nil {
logLevel := log.ErrorLevel
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == route53.ErrCodeHostedZoneNotEmpty {
logLevel = log.InfoLevel
}
log.WithError(err).Log(logLevel, "Cannot delete hosted zone")
}
return err
}
func (a *AWSActuator) deleteRecordSets(logger log.FieldLogger) error {
logger.Info("Deleting route53 recordsets in hostedzone")
maxItems := "100"
listInput := &route53.ListResourceRecordSetsInput{
HostedZoneId: a.zoneID,
MaxItems: &maxItems,
}
for {
listOutput, err := a.awsClient.ListResourceRecordSets(listInput)
if err != nil {
return err
}
var changes []*route53.Change
for _, recordSet := range listOutput.ResourceRecordSets {
// Ignore the 2 recordsets that are created with the hosted zone and that cannot be deleted
if n, t := aws.StringValue(recordSet.Name), aws.StringValue(recordSet.Type); n == controllerutils.Dotted(a.dnsZone.Spec.Zone) && (t == route53.RRTypeNs || t == route53.RRTypeSoa) {
continue
}
logger.WithField("name", aws.StringValue(recordSet.Name)).WithField("type", aws.StringValue(recordSet.Type)).Info("recordset set for deletion")
changes = append(changes, &route53.Change{
Action: aws.String(route53.ChangeActionDelete),
ResourceRecordSet: recordSet,
})
}
if len(changes) > 0 {
logger.WithField("count", len(changes)).Info("deleting recordsets")
if _, err := a.awsClient.ChangeResourceRecordSets(&route53.ChangeResourceRecordSetsInput{
ChangeBatch: &route53.ChangeBatch{Changes: changes},
HostedZoneId: a.zoneID,
}); err != nil {
return err
}
}
if listOutput.IsTruncated == nil || !*listOutput.IsTruncated {
break
}
listInput.StartRecordIdentifier = listOutput.NextRecordIdentifier
listInput.StartRecordName = listOutput.NextRecordName
listInput.StartRecordType = listOutput.NextRecordType
}
return nil
}
// GetNameServers returns the nameservers listed in the route53 hosted zone NS record.
func (a *AWSActuator) GetNameServers() ([]string, error) {
if a.zoneID == nil {
return nil, errors.New("zoneID is unpopulated")
}
logger := a.logger.WithField("id", a.zoneID)
logger.Debug("Listing hosted zone NS records")
resp, err := a.awsClient.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{
HostedZoneId: aws.String(*a.zoneID),
StartRecordType: aws.String("NS"),
StartRecordName: aws.String(a.dnsZone.Spec.Zone),
MaxItems: aws.String("1"),
})
if err != nil {
logger.WithError(err).Error("Error listing recordsets for zone")
return nil, err
}
if len(resp.ResourceRecordSets) != 1 {
msg := fmt.Sprintf("unexpected number of recordsets returned: %d", len(resp.ResourceRecordSets))
logger.Error(msg)
return nil, fmt.Errorf(msg)
}
if aws.StringValue(resp.ResourceRecordSets[0].Type) != "NS" {
msg := "name server record not found"
logger.Error(msg)
return nil, fmt.Errorf(msg)
}
if aws.StringValue(resp.ResourceRecordSets[0].Name) != (a.dnsZone.Spec.Zone + ".") {
msg := fmt.Sprintf("name server record not found for domain %s", a.dnsZone.Spec.Zone)
logger.Error(msg)
return nil, fmt.Errorf(msg)
}
result := make([]string, len(resp.ResourceRecordSets[0].ResourceRecords))
for i, record := range resp.ResourceRecordSets[0].ResourceRecords {
result[i] = aws.StringValue(record.Value)
}
logger.WithField("nameservers", result).Debug("found hosted zone name servers")
return result, nil
}
// Exists determines if the route53 hosted zone corresponding to the DNSZone exists
func (a *AWSActuator) Exists() (bool, error) {
return a.zoneID != nil, nil
}
func (a *AWSActuator) setInsufficientCredentialsConditionToFalse() bool {
accessDeniedConds, accessDeniedCondsChanged := controllerutils.SetDNSZoneConditionWithChangeCheck(
a.dnsZone.Status.Conditions,
hivev1.InsufficientCredentialsCondition,
corev1.ConditionFalse,
accessGrantedReason,
"credentials are valid",
controllerutils.UpdateConditionNever,
)
if accessDeniedCondsChanged {
a.dnsZone.Status.Conditions = accessDeniedConds
}
return accessDeniedCondsChanged
}
func (a *AWSActuator) setInsufficientCredentialsConditionToTrue(message string) bool {
accessDeniedConds, accessDeniedCondsChanged := controllerutils.SetDNSZoneConditionWithChangeCheck(
a.dnsZone.Status.Conditions,
hivev1.InsufficientCredentialsCondition,
corev1.ConditionTrue,
accessDeniedReason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if accessDeniedCondsChanged {
// Conditions have changed. Update them in the object.
a.dnsZone.Status.Conditions = accessDeniedConds
}
return accessDeniedCondsChanged
}
func (a *AWSActuator) setAuthenticationFailureConditionToFalse() bool {
authenticationFailureConds, authenticationFailureCondsChanged := controllerutils.SetDNSZoneConditionWithChangeCheck(
a.dnsZone.Status.Conditions,
hivev1.AuthenticationFailureCondition,
corev1.ConditionFalse,
authenticationSucceededReason,
"credentials authenticated",
controllerutils.UpdateConditionNever,
)
if authenticationFailureCondsChanged {
a.dnsZone.Status.Conditions = authenticationFailureConds
}
return authenticationFailureCondsChanged
}
func (a *AWSActuator) setAuthenticationFailureConditionToTrue(message string) bool {
var authenticationFailureConds []hivev1.DNSZoneCondition
authenticationFailureConds, authenticationFailureCondsChanged := controllerutils.SetDNSZoneConditionWithChangeCheck(
a.dnsZone.Status.Conditions,
hivev1.AuthenticationFailureCondition,
corev1.ConditionTrue,
authenticationFailedReason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if authenticationFailureCondsChanged {
// Conditions have changed. Update them in the object.
a.dnsZone.Status.Conditions = authenticationFailureConds
}
return authenticationFailureCondsChanged
}
// SetConditionsForError sets conditions on the dnszone given a specific error. Returns true if conditions changed.
func (a *AWSActuator) SetConditionsForError(err error) bool {
awsErr, ok := err.(awserr.Error)
if !ok {
accessDeniedCondsChanged := a.setInsufficientCredentialsConditionToFalse()
authenticationFailureCondsChanged := a.setAuthenticationFailureConditionToFalse()
return accessDeniedCondsChanged || authenticationFailureCondsChanged
}
accessDeniedCondsChanged := false
authenticationFailureCondsChanged := false
if awsErr.Code() == "AccessDeniedException" || awsErr.Code() == "AccessDenied" {
accessDeniedCondsChanged = a.setInsufficientCredentialsConditionToTrue(awsErr.Message())
} else {
accessDeniedCondsChanged = a.setInsufficientCredentialsConditionToFalse()
}
if awsErr.Code() == "InvalidSignatureException" ||
awsErr.Code() == "UnrecognizedClientException" {
authenticationFailureCondsChanged = a.setAuthenticationFailureConditionToTrue(awsErr.Message())
} else {
authenticationFailureCondsChanged = a.setAuthenticationFailureConditionToFalse()
}
return accessDeniedCondsChanged || authenticationFailureCondsChanged
}
func tagEquals(a, b *route53.Tag) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
return aws.StringValue(a.Key) == aws.StringValue(b.Key) &&
aws.StringValue(a.Value) == aws.StringValue(b.Value)
}
func tagString(tag *route53.Tag) string {
return fmt.Sprintf("%s=%s", aws.StringValue(tag.Key), aws.StringValue(tag.Value))
}
func tagsString(tags []*route53.Tag) string {
return strings.Join(func() []string {
result := []string{}
for _, tag := range tags {
result = append(result, tagString(tag))
}
return result
}(), ",")
}
| 1 | 13,979 | How would you feel about a different approach where the `DeleteAWSRecordSets` gets passed the zone ID and zone name rather than the `DNSZone`? | openshift-hive | go |
@@ -66,15 +66,17 @@ class PhotoMetricDistortion(object):
class Expand(object):
- def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
+ def __init__(self, mean=(0, 0, 0), to_rgb=True,
+ ratio_range=(1, 4), prob=0.5):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
+ self.prob = prob
def __call__(self, img, boxes, labels):
- if random.randint(2):
+ if random.uniform(0, 1) > self.prob:
return img, boxes, labels
h, w, c = img.shape | 1 | import mmcv
import numpy as np
from numpy import random
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
class PhotoMetricDistortion(object):
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, img, boxes, labels):
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
return img, boxes, labels
class Expand(object):
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, img, boxes, labels):
if random.randint(2):
return img, boxes, labels
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
img = expand_img
boxes += np.tile((left, top), 2)
return img, boxes, labels
class RandomCrop(object):
def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
def __call__(self, img, boxes, labels):
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
if mode == 1:
return img, boxes, labels
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array((int(left), int(top), int(left + new_w),
int(top + new_h)))
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (
center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
if not mask.any():
continue
boxes = boxes[mask]
labels = labels[mask]
# adjust boxes
img = img[patch[1]:patch[3], patch[0]:patch[2]]
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
return img, boxes, labels
class ExtraAugmentation(object):
def __init__(self,
photo_metric_distortion=None,
expand=None,
random_crop=None):
self.transforms = []
if photo_metric_distortion is not None:
self.transforms.append(
PhotoMetricDistortion(**photo_metric_distortion))
if expand is not None:
self.transforms.append(Expand(**expand))
if random_crop is not None:
self.transforms.append(RandomCrop(**random_crop))
def __call__(self, img, boxes, labels):
img = img.astype(np.float32)
for transform in self.transforms:
img, boxes, labels = transform(img, boxes, labels)
return img, boxes, labels
| 1 | 17,595 | it seems `random.uniform(0, 1)` similar to `random.randint(2)`, all have 1/2 probabilities. | open-mmlab-mmdetection | py |
@@ -121,7 +121,11 @@ class AutoScaleConnection(AWSQueryConnection):
for i in xrange(1, len(items)+1):
if isinstance(items[i-1], dict):
for k, v in items[i-1].iteritems():
- params['%s.member.%d.%s' % (label, i, k)] = v
+ if isinstance(v, dict):
+ for kk, vv in v.iteritems():
+ params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv
+ else:
+ params['%s.member.%d.%s' % (label, i, k)] = v
elif isinstance(items[i-1], basestring):
params['%s.member.%d' % (label, i)] = items[i-1]
| 1 | # Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
Auto Scaling service.
"""
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.ec2.regioninfo import RegionInfo
from boto.ec2.autoscale.request import Request
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup, ProcessType
from boto.ec2.autoscale.activity import Activity
from boto.ec2.autoscale.policy import AdjustmentType, MetricCollectionTypes, ScalingPolicy
from boto.ec2.autoscale.instance import Instance
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
RegionData = {
'us-east-1' : 'autoscaling.us-east-1.amazonaws.com',
'us-west-1' : 'autoscaling.us-west-1.amazonaws.com',
'eu-west-1' : 'autoscaling.eu-west-1.amazonaws.com',
'ap-northeast-1' : 'autoscaling.ap-northeast-1.amazonaws.com',
'ap-southeast-1' : 'autoscaling.ap-southeast-1.amazonaws.com'}
def regions():
"""
Get all available regions for the Auto Scaling service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=AutoScaleConnection)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.autoscale.AutoScaleConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.AutoScaleConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class AutoScaleConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'autoscale_version', '2010-08-01')
DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint',
'autoscaling.amazonaws.com')
DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name', 'us-east-1')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=1,
https_connection_factory=None, region=None, path='/'):
"""
Init method to create a new connection to the AutoScaling service.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
AutoScaleConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path=path)
def _required_auth_capability(self):
return ['ec2']
def build_list_params(self, params, items, label):
""" items is a list of dictionaries or strings:
[{'Protocol' : 'HTTP',
'LoadBalancerPort' : '80',
'InstancePort' : '80'},..] etc.
or
['us-east-1b',...]
"""
# different from EC2 list params
for i in xrange(1, len(items)+1):
if isinstance(items[i-1], dict):
for k, v in items[i-1].iteritems():
params['%s.member.%d.%s' % (label, i, k)] = v
elif isinstance(items[i-1], basestring):
params['%s.member.%d' % (label, i)] = items[i-1]
def _update_group(self, op, as_group):
params = {
'AutoScalingGroupName' : as_group.name,
'LaunchConfigurationName' : as_group.launch_config_name,
'MinSize' : as_group.min_size,
'MaxSize' : as_group.max_size,
}
# get availability zone information (required param)
zones = as_group.availability_zones
self.build_list_params(params, zones,
'AvailabilityZones')
if as_group.desired_capacity:
params['DesiredCapacity'] = as_group.desired_capacity
if as_group.vpc_zone_identifier:
params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier
if as_group.health_check_period:
params['HealthCheckGracePeriod'] = as_group.health_check_period
if as_group.health_check_type:
params['HealthCheckType'] = as_group.health_check_type
if as_group.default_cooldown:
params['DefaultCooldown'] = as_group.default_cooldown
if as_group.placement_group:
params['PlacementGroup'] = as_group.placement_group
if op.startswith('Create'):
# you can only associate load balancers with an autoscale group at creation time
if as_group.load_balancers:
self.build_list_params(params, as_group.load_balancers,
'LoadBalancerNames')
return self.get_object(op, params, Request)
def create_auto_scaling_group(self, as_group):
"""
Create auto scaling group.
"""
return self._update_group('CreateAutoScalingGroup', as_group)
def delete_auto_scaling_group(self, name):
"""
Deletes the specified auto scaling group if the group has no instances
and no scaling activities in progress.
"""
params = {'AutoScalingGroupName' : name}
return self.get_object('DeleteAutoScalingGroup', params, Request)
def create_launch_configuration(self, launch_config):
"""
Creates a new Launch Configuration.
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object.
"""
params = {
'ImageId' : launch_config.image_id,
'LaunchConfigurationName' : launch_config.name,
'InstanceType' : launch_config.instance_type,
}
if launch_config.key_name:
params['KeyName'] = launch_config.key_name
if launch_config.user_data:
params['UserData'] = base64.b64encode(launch_config.user_data)
if launch_config.kernel_id:
params['KernelId'] = launch_config.kernel_id
if launch_config.ramdisk_id:
params['RamdiskId'] = launch_config.ramdisk_id
if launch_config.block_device_mappings:
self.build_list_params(params, launch_config.block_device_mappings,
'BlockDeviceMappings')
if launch_config.security_groups:
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
if launch_config.instance_monitoring:
params['InstanceMonitoring'] = 'true'
return self.get_object('CreateLaunchConfiguration', params,
Request, verb='POST')
def create_scaling_policy(self, scaling_policy):
"""
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
"""
params = {'AdjustmentType' : scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName' : scaling_policy.name,
'ScalingAdjustment' : scaling_policy.scaling_adjustment,}
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
return self.get_object('PutScalingPolicy', params, Request)
def delete_launch_configuration(self, launch_config_name):
"""
Deletes the specified LaunchConfiguration.
The specified launch configuration must not be attached to an Auto
Scaling group. Once this call completes, the launch configuration is no
longer available for use.
"""
params = {'LaunchConfigurationName' : launch_config_name}
return self.get_object('DeleteLaunchConfiguration', params, Request)
def get_all_groups(self, names=None, max_records=None, next_token=None):
"""
Returns a full description of each Auto Scaling group in the given
list. This includes all Amazon EC2 instances that are members of the
group. If a list of names is not provided, the service returns the full
details of all Auto Scaling groups.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type names: list
:param names: List of group names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup` instances.
"""
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if names:
self.build_list_params(params, names, 'AutoScalingGroupNames')
return self.get_list('DescribeAutoScalingGroups', params,
[('member', AutoScalingGroup)])
def get_all_launch_configurations(self, **kwargs):
"""
Returns a full description of the launch configurations given the
specified names.
If no names are specified, then the full details of all launch
configurations are returned.
:type names: list
:param names: List of configuration names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of configurations to return.
:type next_token: str
:param next_token: If you have more results than can be returned at once, pass in this
parameter to page through all results.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration` instances.
"""
params = {}
max_records = kwargs.get('max_records', None)
names = kwargs.get('names', None)
if max_records is not None:
params['MaxRecords'] = max_records
if names:
self.build_list_params(params, names, 'LaunchConfigurationNames')
next_token = kwargs.get('next_token')
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeLaunchConfigurations', params,
[('member', LaunchConfiguration)])
def get_all_activities(self, autoscale_group, activity_ids=None, max_records=None, next_token=None):
"""
Get all activities for the given autoscaling group.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter
:type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The auto scaling group to get activities on.
:type max_records: int
:param max_records: Maximum amount of activities to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances.
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName' : name}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if activity_ids:
self.build_list_params(params, activity_ids, 'ActivityIds')
return self.get_list('DescribeScalingActivities', params, [('member', Activity)])
def delete_scheduled_action(self, scheduled_action_name, autoscale_group=None):
params = {
'ScheduledActionName' : scheduled_action_name,
}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeleteScheduledAction', params)
def terminate_instance(self, instance_id, decrement_capacity=True):
params = {
'InstanceId' : instance_id,
'ShouldDecrementDesiredCapacity' : decrement_capacity
}
return self.get_object('TerminateInstanceInAutoScalingGroup', params,
Activity)
def delete_policy(self, policy_name, autoscale_group=None):
params = {
'PolicyName': policy_name,
}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeletePolicy', params)
def get_all_adjustment_types(self):
return self.get_list('DescribeAdjustmentTypes', {}, [('member', AdjustmentType)])
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
"""
Returns a description of each Auto Scaling instance in the instance_ids
list. If a list is not provided, the service returns the full details
of all instances up to a maximum of fifty.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type instance_ids: list
:param instance_ids: List of Autoscaling Instance IDs which should be searched for.
:type max_records: int
:param max_records: Maximum number of results to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances.
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceIds')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAutoScalingInstances', params, [('member', Instance)])
def get_all_metric_collection_types(self):
"""
Returns a list of metrics and a corresponding list of granularities
for each metric.
"""
return self.get_object('DescribeMetricCollectionTypes', {}, MetricCollectionTypes)
def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
"""
Returns descriptions of what each policy does. This action supports
pagination. If the response includes a token, there are more records
available. To get the additional records, repeat the request with the
response token as the NextToken parameter.
If no group name or list of policy names are provided, all available policies
are returned.
:type as_name: str
:param as_name: the name of the :class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
:type names: list
:param names: List of policy names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
"""
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if policy_names:
self.build_list_params(params, policy_names, 'PolicyNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribePolicies', params, [('member', ScalingPolicy)])
def get_all_scaling_process_types(self):
""" Returns scaling process types for use in the ResumeProcesses and
SuspendProcesses actions.
"""
return self.get_list('DescribeScalingProcessTypes', {}, [('member', ProcessType)])
def suspend_processes(self, as_group, scaling_processes=None):
""" Suspends Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to suspend processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to suspend. If omitted, all
processes will be suspended.
"""
params = {
'AutoScalingGroupName' : as_group
}
if scaling_processes:
self.build_list_params(params, scaling_processes, 'ScalingProcesses')
return self.get_status('SuspendProcesses', params)
def resume_processes(self, as_group, scaling_processes=None):
""" Resumes Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to resume processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to resume. If omitted, all
processes will be resumed.
"""
params = {
'AutoScalingGroupName' : as_group
}
if scaling_processes:
self.build_list_params(params, scaling_processes, 'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
def create_scheduled_group_action(self, as_group, name, time, desired_capacity=None,
min_size=None, max_size=None):
""" Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
unchanged in the affected Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to get activities on.
:type name: string
:param name: Scheduled action name.
:type time: datetime.datetime
:param time: The time for this action to start.
:type desired_capacity: int
:param desired_capacity: The number of EC2 instances that should be running in
this group.
:type min_size: int
:param min_size: The minimum size for the new auto scaling group.
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
"""
params = {
'AutoScalingGroupName' : as_group,
'ScheduledActionName' : name,
'Time' : time.isoformat(),
}
if desired_capacity:
params['DesiredCapacity'] = desired_capacity
if min_size:
params['MinSize'] = min_size
if max_size:
params['MaxSize'] = max_size
return self.get_status('PutScheduledUpdateGroupAction', params)
def get_all_scheduled_actions(self, as_group=None, start_time=None, end_time=None, scheduled_actions=None,
max_records=None, next_token=None):
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if scheduled_actions:
self.build_list_params(params, scheduled_actions, 'ScheduledActionNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeScheduledActions', params, [('member', ScheduledUpdateGroupAction)])
def disable_metrics_collection(self, as_group, metrics=None):
"""
Disables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
"""
params = {
'AutoScalingGroupName' : as_group,
}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('DisableMetricsCollection', params)
def enable_metrics_collection(self, as_group, granularity, metrics=None):
"""
Enables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of enabled
metrics with the Metrics parameter.
Auto scaling metrics collection can be turned on only if the
InstanceMonitoring.Enabled flag, in the Auto Scaling group's launch
configuration, is set to true.
:type autoscale_group: string
:param autoscale_group: The auto scaling group to get activities on.
:type granularity: string
:param granularity: The granularity to associate with the metrics to
collect. Currently, the only legal granularity is "1Minute".
:type metrics: string list
:param metrics: The list of metrics to collect. If no metrics are
specified, all metrics are enabled.
"""
params = {
'AutoScalingGroupName' : as_group,
'Granularity' : granularity,
}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('EnableMetricsCollection', params)
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
params = {
'PolicyName' : policy_name,
}
if as_group:
params['AutoScalingGroupName'] = as_group
if honor_cooldown:
params['HonorCooldown'] = honor_cooldown
return self.get_status('ExecutePolicy', params)
def set_instance_health(self, instance_id, health_status,
should_respect_grace_period=True):
"""
Explicitly set the health status of an instance.
:type instance_id: str
:param instance_id: The identifier of the EC2 instance.
:type health_status: str
:param health_status: The health status of the instance.
"Healthy" means that the instance is
healthy and should remain in service.
"Unhealthy" means that the instance is
unhealthy. Auto Scaling should terminate
and replace it.
:type should_respect_grace_period: bool
:param should_respect_grace_period: If True, this call should
respect the grace period
associated with the group.
"""
params = {'InstanceId' : instance_id,
'HealthStatus' : health_status}
if should_respect_grace_period:
params['ShouldRespectGracePeriod'] = 'true'
else:
params['ShouldRespectGracePeriod'] = 'false'
return self.get_status('SetInstanceHealth', params)
| 1 | 7,845 | Added to support EBS volume creation, used like this: blockDeviceMap = [] blockDeviceMap.append( {'DeviceName':'/dev/sdc', 'VirtualName' : 'ephemeral0'}) blockDeviceMap.append( {'DeviceName':'/dev/sdh', 'Ebs': {'VolumeSize' : '100'} }) | boto-boto | py |
@@ -17,6 +17,8 @@ type mockedSigner struct {
signatureToReturn identity.Signature
}
+var testMysteriumApiUrl = "http://testUrl"
+
func (signer *mockedSigner) Sign(message []byte) (identity.Signature, error) {
return signer.signatureToReturn, nil
} | 1 | package server
import (
"bytes"
"github.com/mysterium/node/identity"
"github.com/stretchr/testify/assert"
"io"
"net/url"
"testing"
)
type testPayload struct {
Value string `json:"value"`
}
type mockedSigner struct {
signatureToReturn identity.Signature
}
func (signer *mockedSigner) Sign(message []byte) (identity.Signature, error) {
return signer.signatureToReturn, nil
}
func TestSignatureIsInsertedForSignedPost(t *testing.T) {
signer := mockedSigner{identity.SignatureBase64("deadbeef")}
req, err := newSignedPostRequest("/post-path", testPayload{"abc"}, &signer)
assert.NoError(t, err)
assert.Equal(t, req.Header.Get("Authorization"), "Signature deadbeef")
}
func TestDoGetContactsPassedValuesForUrl(t *testing.T) {
mysteriumApiUrl = "http://testUrl"
params := url.Values{}
params["param1"] = []string{"value1"}
params["param2"] = []string{"value2"}
req, err := newGetRequest("get-path", params)
assert.NoError(t, err)
assert.Equal(t, "http://testUrl/get-path?param1=value1¶m2=value2", req.URL.String())
}
func TestPayloadIsSerializedSuccessfullyForPostMethod(t *testing.T) {
req, err := newPostRequest("post-path", testPayload{"abc"})
assert.NoError(t, err)
bodyBytes := bytes.NewBuffer(nil)
_, err = io.Copy(bodyBytes, req.Body)
assert.NoError(t, err)
assert.JSONEq(
t,
`{
"value" : "abc"
}`,
bodyBytes.String(),
)
}
| 1 | 10,590 | We shouldn't depend that url is `mysterium` - it can be just `testRequestURL`. Also, `URL` has to be upper-case. | mysteriumnetwork-node | go |
@@ -689,8 +689,9 @@ VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceImageFormatProperties2(VkPhysica
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- bool skip = PreCallValidateGetPhysicalDeviceImageFormatProperties2(instance_data->report_data, pImageFormatInfo,
- pImageFormatProperties);
+ unique_lock_t lock(global_lock);
+ bool skip = PreCallValidateGetPhysicalDeviceImageFormatProperties2(physicalDevice, pImageFormatInfo, pImageFormatProperties);
+ lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
} else { | 1 | /* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
*/
#include "core_validation.h"
#include "shader_validation.h"
#include "vk_layer_data.h"
#include "vk_dispatch_table_helper.h"
#include "vk_layer_extension_utils.h"
#include "buffer_validation.h"
#include <unordered_map>
namespace core_validation {
using std::unordered_map;
using mutex_t = std::mutex;
using lock_guard_t = std::lock_guard<mutex_t>;
using unique_lock_t = std::unique_lock<mutex_t>;
extern unordered_map<void *, layer_data *> layer_data_map;
extern unordered_map<void *, instance_layer_data *> instance_layer_data_map;
extern mutex_t global_lock;
static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
static const VkLayerProperties global_layer = {
"VK_LAYER_LUNARG_core_validation",
VK_LAYER_API_VERSION,
1,
"LunarG Validation Layer",
};
static const VkExtensionProperties device_extensions[] = {
{VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
};
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
template <class TCreateInfo>
void ValidateLayerOrdering(const TCreateInfo &createInfo) {
bool foundLayer = false;
for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
foundLayer = true;
}
// This has to be logged to console as we don't have a callback at this point.
if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
lock_guard_t lock(global_lock);
PostCallRecordCreateFence(device, pCreateInfo, pAllocator, pFence, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) return result;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
instance_data->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
instance_data->report_data = debug_utils_create_instance(
&instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
(pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
"lunarg_core_validation");
ValidateLayerOrdering(*pCreateInfo);
PostCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance, result);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
lock_guard_t lock(global_lock);
while (instance_data->logging_messenger.size() > 0) {
VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
instance_data->logging_messenger.pop_back();
}
while (instance_data->logging_callback.size() > 0) {
VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
instance_data->logging_callback.pop_back();
}
layer_debug_utils_destroy_instance(instance_data->report_data);
FreeLayerDataPtr(key, instance_layer_data_map);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
// Advance the link info for the next element on the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
// GPU Validation can possibly turn on device features, so give it a chance to change the create info.
std::unique_ptr<safe_VkDeviceCreateInfo> gpu_create_info;
if (instance_data->enabled.gpu_validation) {
VkPhysicalDeviceFeatures supported_features;
instance_data->dispatch_table.GetPhysicalDeviceFeatures(gpu, &supported_features);
gpu_create_info = GpuPreCallRecordCreateDevice(gpu, pCreateInfo, &supported_features);
pCreateInfo = reinterpret_cast<VkDeviceCreateInfo *>(gpu_create_info.get());
}
lock.unlock();
VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
lock.lock();
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_data->instance_data = instance_data;
// Setup device dispatch table
layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
device_data->device = *pDevice;
// Save PhysicalDevice handle
device_data->physical_device = gpu;
device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
// Get physical device limits for this device
instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
// Setup the validation tables based on the application API version from the instance and the capabilities of the device driver.
uint32_t effective_api_version = std::min(device_data->phys_dev_properties.properties.apiVersion, instance_data->api_version);
device_data->api_version =
device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, effective_api_version, pCreateInfo);
PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
ValidateLayerOrdering(*pCreateInfo);
lock.unlock();
return result;
}
// TODO handle pipeline caches
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Pre-record to avoid Destroy/Create race (if/when implemented)
dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
void *pData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
assert(physicalDevice);
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
}
VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_state = GetCBNode(dev_data, commandBuffer);
assert(cb_state);
auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
assert(dst_buff_state);
skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
lock.lock();
PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
unique_lock_t lock(global_lock);
PostCallRecordGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
unique_lock_t lock(global_lock);
if (*pQueue != VK_NULL_HANDLE) {
PostCallRecordGetDeviceQueue2(device, pQueueInfo, pQueue);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion);
unique_lock_t lock(global_lock);
PostCallRecordCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion, result);
lock.unlock();
return result;
};
VKAPI_ATTR VkResult VKAPI_CALL CreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateSamplerYcbcrConversionKHR(device, pCreateInfo, pAllocator, pYcbcrConversion);
unique_lock_t lock(global_lock);
PostCallRecordCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion, result);
lock.unlock();
return result;
};
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
return result;
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdDebugMarkerBeginEXT(device_data, cb_state);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
}
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
unique_lock_t lock(global_lock);
PreCallRecordDestroyDevice(device, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyDevice(device, pAllocator);
lock.lock();
FreeLayerDataPtr(key, layer_data_map);
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
lock.lock();
PostCallRecordAllocateMemory(device, pAllocateInfo, pAllocator, pMemory, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeMemory(device, mem, pAllocator);
lock.unlock();
if (!skip) {
if (mem != VK_NULL_HANDLE) {
// Avoid free/alloc race by recording state change before dispatching
lock.lock();
PreCallRecordFreeMemory(device, mem, pAllocator);
lock.unlock();
}
dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Verify fence status of submitted fences
unique_lock_t lock(global_lock);
bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetFenceStatus(device, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
lock.lock();
PostCallRecordGetFenceStatus(device, fence, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueWaitIdle(queue);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
lock.lock();
PostCallRecordQueueWaitIdle(queue, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDeviceWaitIdle(dev_data);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordDeviceWaitIdle(dev_data);
lock.unlock();
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyFence(device, fence, pAllocator);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyFence(device, fence, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySemaphore(device, semaphore, pAllocator);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySemaphore(device, semaphore, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyEvent(device, event, pAllocator);
if (!skip) {
PreCallRecordDestroyEvent(device, event, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyQueryPool(device, queryPool, pAllocator);
if (!skip) {
PreCallRecordDestroyQueryPool(device, queryPool, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result =
dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
lock.lock();
PostCallRecordGetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, result);
lock.unlock();
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyBuffer(device, buffer, pAllocator);
if (!skip) {
PreCallRecordDestroyBuffer(device, buffer, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Validate state before calling down chain, update common data if we'll be calling down chain
bool skip = PreCallValidateDestroyBufferView(device, bufferView, pAllocator);
if (!skip) {
if (bufferView != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyBufferView(device, bufferView, pAllocator);
}
lock.unlock();
dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyImage(device, image, pAllocator);
if (!skip) {
if (image != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyImage(device, image, pAllocator);
}
lock.unlock();
dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateBindBufferMemory(device, buffer, mem, memoryOffset);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
lock.lock();
PostCallRecordBindBufferMemory(device, buffer, mem, memoryOffset, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
bool skip = false;
unique_lock_t lock(global_lock);
skip = PreCallValidateBindBufferMemory2(device, bindInfoCount, pBindInfos);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
lock.lock();
PostCallRecordBindBufferMemory2(device, bindInfoCount, pBindInfos, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
bool skip = false;
unique_lock_t lock(global_lock);
skip = PreCallValidateBindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
lock.lock();
PostCallRecordBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
unique_lock_t lock(global_lock);
PostCallRecordGetImageMemoryRequirements(device, image, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
if (skip) return;
lock.unlock();
dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
lock.lock();
PostCallRecordGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
if (skip) return;
lock.unlock();
dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
lock.lock();
PostCallRecordGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
PostCallRecordGetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
PostCallRecordGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
PostCallRecordGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
VkImageType type, VkSampleCountFlagBits samples,
VkImageUsageFlags usage, VkImageTiling tiling,
uint32_t *pPropertyCount,
VkSparseImageFormatProperties *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
pPropertyCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetPhysicalDeviceImageFormatProperties2(instance_data->report_data, pImageFormatInfo,
pImageFormatProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties2(physicalDevice, pImageFormatInfo,
pImageFormatProperties);
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetPhysicalDeviceImageFormatProperties2(instance_data->report_data, pImageFormatInfo,
pImageFormatProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties2KHR(physicalDevice, pImageFormatInfo,
pImageFormatProperties);
}
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
VkSparseImageFormatProperties2KHR *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
VkSparseImageFormatProperties2KHR *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
}
VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyImageView(device, imageView, pAllocator);
if (!skip) {
PreCallRecordDestroyImageView(device, imageView, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
PreCallRecordDestroyShaderModule(device, shaderModule, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyPipeline(device, pipeline, pAllocator);
if (!skip) {
PreCallRecordDestroyPipeline(device, pipeline, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyPipelineLayout(device, pipelineLayout, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySampler(device, sampler, pAllocator);
if (!skip) {
PreCallRecordDestroySampler(device, sampler, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
{
lock_guard_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
}
dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyDescriptorPool(device, descriptorPool, pAllocator);
if (!skip) {
PreCallRecordDestroyDescriptorPool(device, descriptorPool, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
if (skip) return;
PreCallRecordFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
lock.unlock();
dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
lock_guard_t lock(global_lock);
PostCallRecordCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
lock.unlock();
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
}
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordCreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool, result);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyCommandPool(device, commandPool, pAllocator);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyCommandPool(device, commandPool, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
auto pPool = GetCommandPoolNode(dev_data, commandPool);
bool skip = PreCallValidateResetCommandPool(dev_data, pPool);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
// Reset all of the CBs allocated from this pool
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetCommandPool(dev_data, pPool);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateResetFences(dev_data, fenceCount, pFences);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordResetFences(dev_data, fenceCount, pFences);
lock.unlock();
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyFramebuffer(device, framebuffer, pAllocator);
if (!skip) {
PreCallRecordDestroyFramebuffer(device, framebuffer, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyRenderPass(device, renderPass, pAllocator);
if (!skip) {
PreCallRecordDestroyRenderPass(device, renderPass, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
lock.lock();
PostCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateBufferView(device, pCreateInfo, pAllocator, pView);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
lock.lock();
PostCallRecordCreateBufferView(device, pCreateInfo, pAllocator, pView, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateImage(device, pCreateInfo, pAllocator, pImage);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
}
lock.lock();
PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateImageView(device, pCreateInfo, pAllocator, pView);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
lock.lock();
PostCallRecordCreateImageView(device, pCreateInfo, pAllocator, pView, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete (ValidationCache *)validationCache;
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t inSize = *pDataSize;
((ValidationCache *)validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto dst = (ValidationCache *)dstCache;
auto src = (ValidationCache const *const *)pSrcCaches;
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
if (src[i] == dst) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
0, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src[i]);
}
}
return result;
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetAndroidHardwareBufferProperties(device, buffer, pProperties);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
lock.unlock();
VkResult result = dev_data->dispatch_table.GetAndroidHardwareBufferPropertiesANDROID(device, buffer, pProperties);
lock.lock();
PostCallRecordGetAndroidHardwareBufferProperties(device, buffer, pProperties, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetMemoryAndroidHardwareBuffer(device, pInfo, pBuffer);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
return dev_data->dispatch_table.GetMemoryAndroidHardwareBufferANDROID(device, pInfo, pBuffer);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
std::vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateComputePipelines(dev_data, &pipe_state, count, pCreateInfos);
if (skip) {
for (uint32_t i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
lock.unlock();
auto result =
dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
PostCallRecordCreateComputePipelines(dev_data, &pipe_state, count, pPipelines);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
bool skip = false;
std::vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
pipe_state.reserve(count);
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
skip |= PreCallValidateCreateRayTracingPipelinesNV(dev_data, count, pCreateInfos, pipe_state);
lock.unlock();
if (skip) {
for (uint32_t i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
auto result =
dev_data->dispatch_table.CreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
PostCallRecordCreateRayTracingPipelinesNV(dev_data, count, pipe_state, pPipelines);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
lock_guard_t lock(global_lock);
PostCallRecordCreateSampler(device, pCreateInfo, pAllocator, pSampler, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
if (!skip) {
lock.unlock();
result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
lock.lock();
PostCallRecordCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout, result);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
lock_guard_t lock(global_lock);
PostCallRecordCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Make sure sets being destroyed are not currently in-use
bool skip = PreCallValidateResetDescriptorPool(dev_data, descriptorPool);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetDescriptorPool(dev_data, device, descriptorPool, flags);
lock.unlock();
}
return result;
}
// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Make sure that no sets being destroyed are in-flight
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
VkResult result;
if (skip) {
result = VK_ERROR_VALIDATION_FAILED_EXT;
} else {
// A race here is invalid (descriptorPool should be externally sync'd), but code defensively against an invalid race
PreCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
lock.unlock();
result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
// Only map look-up at top level is for device-level layer_data
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
if (!skip) {
// Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
lock.unlock();
dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
VkCommandBuffer *pCommandBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
unique_lock_t lock(global_lock);
PostCallRecordAllocateCommandBuffers(device, pCreateInfo, pCommandBuffer, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
skip |= PreCallValidateBeginCommandBuffer(commandBuffer, pBeginInfo);
PreCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
skip |= PreCallValidateEndCommandBuffer(commandBuffer);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
lock.lock();
PostCallRecordEndCommandBuffer(commandBuffer, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateResetCommandBuffer(commandBuffer, flags);
lock.unlock();
if (!skip) result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
lock.lock();
PostCallRecordResetCommandBuffer(commandBuffer, flags, result);
lock.unlock();
return result;
}
VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetViewport(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetViewport(pCB, firstViewport, viewportCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
}
VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetScissor(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetScissor(pCB, firstScissor, scissorCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
}
VKAPI_ATTR void VKAPI_CALL CmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetExclusiveScissorNV(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetExclusiveScissorNV(pCB, firstExclusiveScissor, exclusiveScissorCount);
}
}
lock.unlock();
if (!skip)
dev_data->dispatch_table.CmdSetExclusiveScissorNV(commandBuffer, firstExclusiveScissor, exclusiveScissorCount,
pExclusiveScissors);
}
VKAPI_ATTR void VKAPI_CALL CmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdBindShadingRateImageNV(dev_data, pCB, commandBuffer, imageView, imageLayout);
if (!skip) {
PreCallRecordCmdBindShadingRateImageNV(dev_data, pCB, imageView);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdBindShadingRateImageNV(commandBuffer, imageView, imageLayout);
}
VKAPI_ATTR void VKAPI_CALL CmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetViewportShadingRatePaletteNV(dev_data, pCB, commandBuffer, firstViewport, viewportCount,
pShadingRatePalettes);
if (!skip) {
PreCallRecordCmdSetViewportShadingRatePaletteNV(pCB, firstViewport, viewportCount);
}
}
lock.unlock();
if (!skip)
dev_data->dispatch_table.CmdSetViewportShadingRatePaletteNV(commandBuffer, firstViewport, viewportCount,
pShadingRatePalettes);
}
VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetLineWidth(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetLineWidth(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
}
VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetDepthBias(dev_data, pCB, commandBuffer, depthBiasClamp);
if (!skip) {
PreCallRecordCmdSetDepthBias(pCB);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
}
}
VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetBlendConstants(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetBlendConstants(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
}
VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetDepthBounds(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetDepthBounds(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
}
VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilCompareMask(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilCompareMask(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
}
VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilWriteMask(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilWriteMask(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
}
VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilReference(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilReference(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
}
VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
if (!skip) {
PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
dynamicOffsetCount, pDynamicOffsets);
lock.unlock();
device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
} else {
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
auto cb_state = GetCBNode(device_data, commandBuffer);
if (cb_state) {
skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
if (!skip) {
PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
}
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto buffer_state = GetBufferState(dev_data, buffer);
auto cb_node = GetCBNode(dev_data, commandBuffer);
assert(cb_node);
assert(buffer_state);
PreCallValidateCmdBindIndexBuffer(dev_data, buffer_state, cb_node, commandBuffer, buffer, offset, indexType);
if (skip) return;
PreCallRecordCmdBindIndexBuffer(buffer_state, cb_node, buffer, offset, indexType);
lock.unlock();
dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
}
VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
assert(cb_node);
skip |= PreCallValidateCmdBindVertexBuffers(dev_data, cb_node, bindingCount, pBuffers, pOffsets);
if (skip) return;
PreCallRecordCmdBindVertexBuffers(cb_node, firstBinding, bindingCount, pBuffers, pOffsets);
lock.unlock();
dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
}
VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
uint32_t firstVertex, uint32_t firstInstance) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
lock.lock();
PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
"vkCmdDrawIndexed()", indexCount, firstIndex);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
lock.lock();
PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t count, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
&cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
lock.lock();
PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip =
PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
lock.lock();
PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
&cb_state, &buffer_state, "vkCmdDispatchIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
lock.lock();
PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
if (!skip) {
PreCallRecordCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
lock.unlock();
device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
skip = PreCallValidateCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
if (!skip) {
PreCallRecordCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
lock.unlock();
device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
&buffer_state, "vkCmdDrawIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
lock.lock();
PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
if (!skip) {
PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
lock.unlock();
dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
skip = PreCallValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
if (!skip) {
PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
lock.unlock();
device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
skip = PreCallValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
if (!skip) {
PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
lock.unlock();
device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
if (!skip) {
PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
lock.unlock();
device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
}
VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
{
lock_guard_t lock(global_lock);
skip = PreCallValidateCmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
if (!skip) {
PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
lock.unlock();
dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
}
}
VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
if (!skip) {
PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
lock.unlock();
dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
}
}
VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip =
PreCallValidateCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
if (!skip) {
PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
lock.unlock();
dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetImageSubresourceLayout(device, image, pSubresource, pLayout);
if (!skip) {
lock.unlock();
device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
}
}
VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetEvent(dev_data, pCB, stageMask);
PreCallRecordCmdSetEvent(dev_data, pCB, commandBuffer, event, stageMask);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
}
VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdResetEvent(dev_data, pCB, stageMask);
PreCallRecordCmdResetEvent(dev_data, pCB, commandBuffer, event);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
}
VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdEventCount(dev_data, cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
if (!skip) {
PreCallRecordCmdWaitEvents(dev_data, cb_state, eventCount, pEvents, sourceStageMask, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
lock.lock();
PostCallRecordCmdWaitEvents(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (!skip) {
PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
} else {
assert(0);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
PreCallValidateCmdBeginQuery(dev_data, pCB, queryPool, flags);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
lock.lock();
if (pCB) {
PostCallRecordCmdBeginQuery(dev_data, queryPool, slot, pCB);
}
}
VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
QueryObject query = {queryPool, slot};
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdEndQuery(dev_data, cb_state, query, commandBuffer, queryPool, slot);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
lock.lock();
if (cb_state) {
PostCallRecordCmdEndQuery(dev_data, cb_state, query, commandBuffer, queryPool);
}
}
VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
bool skip = PreCallValidateCmdResetQueryPool(dev_data, cb_state);
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
lock.lock();
PostCallRecordCmdResetQueryPool(dev_data, cb_state, commandBuffer, queryPool, firstQuery, queryCount);
}
VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
if (cb_node && dst_buff_state) {
skip |= PreCallValidateCmdCopyQueryPoolResults(dev_data, cb_node, dst_buff_state);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
flags);
lock.lock();
if (cb_node && dst_buff_state) {
PostCallRecordCmdCopyQueryPoolResults(dev_data, cb_node, dst_buff_state, queryPool, firstQuery, queryCount);
}
}
VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
uint32_t offset, uint32_t size, const void *pValues) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdPushConstants(dev_data, commandBuffer, layout, stageFlags, offset, size);
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
}
VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdWriteTimestamp(dev_data, cb_state);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
lock.lock();
if (cb_state) PostCallRecordCmdWriteTimestamp(cb_state, commandBuffer, queryPool, slot);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
lock.lock();
PostCallRecordCreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
skip = PreCallValidateCreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass, result);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
skip = PreCallValidateCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.CreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass);
lock.lock();
PostCallRecordCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass, result);
return result;
}
VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdBeginRenderPass(dev_data, cb_state, RENDER_PASS_VERSION_1, pRenderPassBegin);
if (!skip) {
PreCallRecordCmdBeginRenderPass(dev_data, cb_state, pRenderPassBegin, contents);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdBeginRenderPass(dev_data, cb_state, RENDER_PASS_VERSION_2, pRenderPassBegin);
if (!skip) {
PreCallRecordCmdBeginRenderPass(dev_data, cb_state, pRenderPassBegin, pSubpassBeginInfo->contents);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdNextSubpass(dev_data, pCB, RENDER_PASS_VERSION_1, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
if (pCB) {
lock.lock();
PostCallRecordCmdNextSubpass(dev_data, pCB, contents);
}
}
VKAPI_ATTR void VKAPI_CALL CmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdNextSubpass(dev_data, pCB, RENDER_PASS_VERSION_2, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
if (pCB) {
lock.lock();
PostCallRecordCmdNextSubpass(dev_data, pCB, pSubpassBeginInfo->contents);
}
}
VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdEndRenderPass(dev_data, pCB, RENDER_PASS_VERSION_1, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
if (pCB) {
lock.lock();
PostCallRecordCmdEndRenderPass(dev_data, pCB);
}
}
VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdEndRenderPass(dev_data, pCB, RENDER_PASS_VERSION_2, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
if (pCB) {
lock.lock();
PostCallRecordCmdEndRenderPass(dev_data, pCB);
}
}
VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
// TODO: State changes needs to be untangled from validation in PreCallValidationCmdExecuteCommands()
skip |= PreCallValidateCmdExecuteCommands(dev_data, cb_state, commandBuffer, commandBuffersCount, pCommandBuffers);
PreCallRecordCmdExecuteCommands(dev_data, cb_state, commandBuffersCount, pCommandBuffers);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
}
VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
void **ppData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateMapMemory(device, mem, offset, size, flags, ppData);
lock.unlock();
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
lock.lock();
PostCallRecordMapMemory(device, mem, offset, size, flags, ppData, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateUnmapMemory(device, mem);
PreCallRecordUnmapMemory(device, mem);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.UnmapMemory(device, mem);
}
}
VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateInvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
lock.lock();
PostCallRecordInvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateBindImageMemory(device, image, mem, memoryOffset);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
lock.lock();
PostCallRecordBindImageMemory(device, image, mem, memoryOffset, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateBindImageMemory2(device, bindInfoCount, pBindInfos);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
lock.lock();
PostCallRecordBindImageMemory2(device, bindInfoCount, pBindInfos, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateBindImageMemory2KHR(device, bindInfoCount, pBindInfos);
lock.unlock();
if (!skip) {
result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
lock.lock();
PostCallRecordBindImageMemory2KHR(device, bindInfoCount, pBindInfos, result);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateSetEvent(dev_data, event);
PreCallRecordSetEvent(dev_data, event);
lock.unlock();
if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
lock.lock();
PostCallRecordQueueBindSparse(queue, bindInfoCount, pBindInfo, fence, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
lock_guard_t lock(global_lock);
PostCallRecordCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore, result);
return result;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL
ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip =
PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
pImportSemaphoreFdInfo->flags);
}
return result;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
unique_lock_t lock(global_lock);
PostCallRecordGetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, result);
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
unique_lock_t lock(global_lock);
PostCallRecordGetSemaphoreFdKHR(device, pGetFdInfo, pFd, result);
return result;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
pImportFenceWin32HandleInfo->flags);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
}
return result;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
unique_lock_t lock(global_lock);
PostCallRecordGetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, result);
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
if (result == VK_SUCCESS) {
PostCallRecordGetFenceFdKHR(device, pGetFdInfo, pFd, result);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
lock_guard_t lock(global_lock);
PostCallRecordCreateEvent(device, pCreateInfo, pAllocator, pEvent, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
unique_lock_t lock(global_lock);
skip = PreCallValidateCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
lock.unlock();
VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
lock.lock();
PostCallRecordCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain, result);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
lock.unlock();
dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
lock.unlock();
if (!skip) {
result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
}
lock.lock();
PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueuePresentKHR(queue, pPresentInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
lock.lock();
PostCallRecordQueuePresentKHR(queue, pPresentInfo, result);
lock.unlock();
return result;
} // namespace core_validation
VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
unique_lock_t lock(global_lock);
skip = PreCallValidateCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
lock.unlock();
VkResult result =
dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
lock.lock();
PostCallRecordCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains, result);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCommonAcquireNextImage(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR");
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
lock.lock();
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
PostCallRecordCommonAcquireNextImage(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip =
PreCallValidateCommonAcquireNextImage(dev_data, device, pAcquireInfo->swapchain, pAcquireInfo->timeout,
pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR");
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
lock.lock();
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
PostCallRecordCommonAcquireNextImage(dev_data, device, pAcquireInfo->swapchain, pAcquireInfo->timeout,
pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex);
// TODO: consider physical device masks
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
VkPhysicalDevice *pPhysicalDevices) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
assert(instance_data);
unique_lock_t lock(global_lock);
// For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
if (pPhysicalDevices) {
skip |= PreCallValidateEnumeratePhysicalDevices(instance_data, pPhysicalDeviceCount);
}
PreCallRecordEnumeratePhysicalDevices(instance_data);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
lock.lock();
PostCallRecordEnumeratePhysicalDevices(instance_data, result, pPhysicalDeviceCount, pPhysicalDevices);
return result;
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties *pPhysicalDeviceProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceProperties(physicalDevice, pPhysicalDeviceProperties);
if (instance_data->enabled.gpu_validation && instance_data->enabled.gpu_validation_reserve_binding_slot) {
if (pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 1) {
pPhysicalDeviceProperties->limits.maxBoundDescriptorSets -= 1;
} else {
log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "UNASSIGNED-GPU-Assisted Validation Setup Error.",
"Unable to reserve descriptor binding slot on a device with only one slot.");
}
}
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySurfaceKHR(instance, surface, pAllocator);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordValidateDestroySurfaceKHR(instance, surface, pAllocator);
lock.unlock();
instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateDisplayPlaneSurfaceKHR)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateAndroidSurfaceKHR)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_IOS_MVK
VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateIOSSurfaceMVK)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_MACOS_MVK
VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateMacOSSurfaceMVK)(instance, pCreateInfo, pAllocator, pSurface);
if (result == VK_SUCCESS) {
unique_lock_t lock(global_lock);
PostCallRecordCreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface, result);
}
return result;
}
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateWaylandSurfaceKHR)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
return result;
}
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateWin32SurfaceKHR)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
return result;
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateXcbSurfaceKHR)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
connection, visual_id);
return result;
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = (instance_data->dispatch_table.CreateXlibSurfaceKHR)(instance, pCreateInfo, pAllocator, pSurface);
unique_lock_t lock(global_lock);
PostCallRecordCreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, result);
return result;
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
return result;
}
#endif // VK_USE_PLATFORM_XLIB_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
unique_lock_t lock(global_lock);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(instance_data, physicalDevice, pSurfaceCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
skip |= PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(instance_data, pd_state, queueFamilyIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
auto result =
instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(instance_data, physicalDevice, queueFamilyIndex, surface, pSupported);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
pPresentModes);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
unique_lock_t lock(global_lock);
PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(instance_data, physicalDevice, pPresentModeCount, pPresentModes);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
if (pSurfaceFormats) {
skip |= PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(instance_data, physical_device_state, call_state, physicalDevice,
pSurfaceFormatCount);
}
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
// Call down the call chain:
auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
pSurfaceFormats);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(physical_device_state, call_state, pSurfaceFormatCount, pSurfaceFormats);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
pSurfaceFormatCount, pSurfaceFormats);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
PreCallRecordSetDebugUtilsObjectNameEXT(dev_data, pNameInfo);
if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordQueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
}
lock_guard_t lock(global_lock);
PostCallRecordQueueEndDebugUtilsLabelEXT(queue);
}
VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordQueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordCmdBeginDebugUtilsLabelEXT(dev_data, commandBuffer, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
}
lock_guard_t lock(global_lock);
PostCallRecordCmdEndDebugUtilsLabelEXT(dev_data, commandBuffer);
}
VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordCmdInsertDebugUtilsLabelEXT(dev_data, commandBuffer, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugUtilsMessengerEXT *pMessenger) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
std::unique_lock<std::mutex> lock(global_lock);
PostCallRecordCreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger, result);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
std::unique_lock<std::mutex> lock(global_lock);
PostCallRecordDestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
lock.unlock();
}
VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageTypes,
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugReportCallbackEXT *pMsgCallback) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
lock_guard_t lock(global_lock);
PostCallRecordCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback, result);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
lock_guard_t lock(global_lock);
PostCallDestroyDebugReportCallbackEXT(instance_data, msgCallback, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
pPhysicalDeviceGroupProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
pPhysicalDeviceGroupProperties);
if (result == VK_SUCCESS) {
PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
lock.unlock();
result =
device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
lock.lock();
PostCallRecordCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, result);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
lock.unlock();
result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
pDescriptorUpdateTemplate);
lock.lock();
PostCallRecordCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, result);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
lock.unlock();
device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
lock.unlock();
device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
skip = PreCallValidateUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
if (!skip) {
PreCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
lock.unlock();
device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
}
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
skip = PreCallValidateUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
if (!skip) {
PreCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
lock.unlock();
device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
}
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set, const void *pData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |=
PreCallValidateCmdPushDescriptorSetWithTemplateKHR(dev_data, cb_state, descriptorUpdateTemplate, layout, set, pData);
if (!skip) {
PreCallRecordCmdPushDescriptorSetWithTemplateKHR(dev_data, cb_state, descriptorUpdateTemplate, layout, set, pData);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPlanePropertiesKHR *pProperties) {
VkResult result = VK_SUCCESS;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pPropertyCount,
VkDisplayPlaneProperties2KHR *pProperties) {
VkResult result = VK_SUCCESS;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
lock.unlock();
if (!skip) {
result =
instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
lock.unlock();
if (!skip) {
result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities);
lock.unlock();
if (!skip) {
result = instance_data->dispatch_table.GetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
unique_lock_t lock(global_lock);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
PreCallRecordDebugMarkerSetObjectNameEXT(device_data, pNameInfo);
lock.unlock();
VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
return result;
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdDebugMarkerEndEXT(device_data, cb_state);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
}
}
VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdSetDiscardRectangleEXT(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
pDiscardRectangles);
}
}
VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdSetSampleLocationsEXT(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndirectCountKHR(dev_data, commandBuffer, buffer, offset, countBuffer, countBufferOffset,
stride, &cb_state, &buffer_state, &count_buffer_state, false,
VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirectCountKHR()");
if (!skip) {
PreCallRecordCmdDrawIndirectCountKHR(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state, count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexedIndirectCountKHR(
dev_data, commandBuffer, buffer, offset, countBuffer, countBufferOffset, stride, &cb_state, &buffer_state,
&count_buffer_state, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirectCountKHR()");
if (!skip) {
PreCallRecordCmdDrawIndexedIndirectCountKHR(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state,
count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawMeshTasksNV(dev_data, commandBuffer, /* indexed */ false, VK_PIPELINE_BIND_POINT_GRAPHICS,
&cb_state, "vkCmdDrawMeshTasksNV()");
if (!skip) {
PreCallRecordCmdDrawMeshTasksNV(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
dev_data->dispatch_table.CmdDrawMeshTasksNV(commandBuffer, taskCount, firstTask);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t drawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawMeshTasksIndirectNV(dev_data, commandBuffer, buffer, /* indexed */ false,
VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, &buffer_state,
"vkCmdDrawMeshTasksIndirectNV()");
if (!skip) {
PreCallRecordCmdDrawMeshTasksIndirectNV(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawMeshTasksIndirectNV(commandBuffer, buffer, offset, drawCount, stride);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawMeshTasksIndirectCountNV(dev_data, commandBuffer, buffer, countBuffer, /* indexed */ false,
VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, &buffer_state,
&count_buffer_state, "vkCmdDrawMeshTasksIndirectCountNV()");
if (!skip) {
PreCallRecordCmdDrawMeshTasksIndirectCountNV(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state,
count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawMeshTasksIndirectCountNV(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
VKAPI_ATTR void VKAPI_CALL DestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.DestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator);
unique_lock_t lock(global_lock);
PostCallRecordDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator);
lock.unlock();
};
VKAPI_ATTR void VKAPI_CALL DestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.DestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator);
unique_lock_t lock(global_lock);
PostCallRecordDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator);
lock.unlock();
};
VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
// The order of operations here is a little convoluted but gets the job done
// 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
// 2. Create state is then validated (which uses flags setup during shadowing)
// 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
std::vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateGraphicsPipelines(dev_data, &pipe_state, count, pCreateInfos);
if (skip) {
for (uint32_t i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
// GPU Validation can possibly replace instrumented shaders with non-instrumented ones, so give it a chance to modify the create
// infos.
std::vector<safe_VkGraphicsPipelineCreateInfo> gpu_create_infos;
if (GetEnables(dev_data)->gpu_validation) {
gpu_create_infos = GpuPreCallRecordCreateGraphicsPipelines(dev_data, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, pipe_state);
pCreateInfos = reinterpret_cast<VkGraphicsPipelineCreateInfo *>(gpu_create_infos.data());
}
lock.unlock();
auto result =
dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
PostCallRecordCreateGraphicsPipelines(dev_data, &pipe_state, count, pCreateInfos, pAllocator, pPipelines);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result;
bool skip = PreCallValidateCreatePipelineLayout(dev_data, pCreateInfo);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
if (GetEnables(dev_data)->gpu_validation) {
unique_lock_t lock(global_lock);
result = GpuOverrideDispatchCreatePipelineLayout(dev_data, pCreateInfo, pAllocator, pPipelineLayout);
} else {
result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
}
if (VK_SUCCESS == result) {
PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdBindPipeline(dev_data, cb_state);
PreCallRecordCmdBindPipeline(dev_data, cb_state, pipelineBindPoint, pipeline);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
if (GetEnables(dev_data)->gpu_validation) {
lock.lock();
// Bind the debug descriptor set immediately after binding the pipeline.
GpuPostCallDispatchCmdBindPipeline(dev_data, commandBuffer, pipelineBindPoint, pipeline);
lock.unlock();
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool is_spirv;
bool spirv_valid;
VkResult result;
uint32_t unique_shader_id = 0;
if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &is_spirv, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
if (GetEnables(dev_data)->gpu_validation) {
result = GpuOverrideDispatchCreateShaderModule(dev_data, pCreateInfo, pAllocator, pShaderModule, &unique_shader_id);
} else {
result = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
}
if (result == VK_SUCCESS) {
lock_guard_t lock(global_lock);
PostCallRecordCreateShaderModule(dev_data, is_spirv, pCreateInfo, pShaderModule, unique_shader_id);
}
return result;
}
VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetBufferDeviceAddressEXT(dev_data, pInfo);
if (!skip) {
lock.unlock();
return dev_data->dispatch_table.GetBufferDeviceAddressEXT(device, pInfo);
}
return 0;
}
VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueSubmit(queue, submitCount, pSubmits, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
lock.lock();
PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
lock.unlock();
return result;
}
// Map of all APIs to be intercepted by this layer
static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
{"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
{"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
{"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
{"vkCreateInstance", (void *)CreateInstance},
{"vkCreateDevice", (void *)CreateDevice},
{"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
{"vkGetPhysicalDeviceProperties", (void *)GetPhysicalDeviceProperties},
{"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
{"vkDestroyInstance", (void *)DestroyInstance},
{"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
{"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
{"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
{"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
{"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
{"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
{"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
{"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
{"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
{"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
{"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
{"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
{"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
{"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
{"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
{"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
{"vkAcquireNextImage2KHR", (void *)AcquireNextImage2KHR},
{"vkQueuePresentKHR", (void *)QueuePresentKHR},
{"vkQueueSubmit", (void *)QueueSubmit},
{"vkWaitForFences", (void *)WaitForFences},
{"vkGetFenceStatus", (void *)GetFenceStatus},
{"vkQueueWaitIdle", (void *)QueueWaitIdle},
{"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
{"vkGetDeviceQueue", (void *)GetDeviceQueue},
{"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
{"vkDestroyDevice", (void *)DestroyDevice},
{"vkDestroyFence", (void *)DestroyFence},
{"vkResetFences", (void *)ResetFences},
{"vkDestroySemaphore", (void *)DestroySemaphore},
{"vkDestroyEvent", (void *)DestroyEvent},
{"vkDestroyQueryPool", (void *)DestroyQueryPool},
{"vkDestroyBuffer", (void *)DestroyBuffer},
{"vkDestroyBufferView", (void *)DestroyBufferView},
{"vkDestroyImage", (void *)DestroyImage},
{"vkDestroyImageView", (void *)DestroyImageView},
{"vkDestroyShaderModule", (void *)DestroyShaderModule},
{"vkDestroyPipeline", (void *)DestroyPipeline},
{"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
{"vkDestroySampler", (void *)DestroySampler},
{"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
{"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
{"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
{"vkDestroyRenderPass", (void *)DestroyRenderPass},
{"vkCreateBuffer", (void *)CreateBuffer},
{"vkCreateBufferView", (void *)CreateBufferView},
{"vkCreateImage", (void *)CreateImage},
{"vkCreateImageView", (void *)CreateImageView},
{"vkCreateFence", (void *)CreateFence},
{"vkCreatePipelineCache", (void *)CreatePipelineCache},
{"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
{"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
{"vkMergePipelineCaches", (void *)MergePipelineCaches},
{"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
{"vkCreateComputePipelines", (void *)CreateComputePipelines},
{"vkCreateSampler", (void *)CreateSampler},
{"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
{"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
{"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
{"vkResetDescriptorPool", (void *)ResetDescriptorPool},
{"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
{"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
{"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
{"vkCreateCommandPool", (void *)CreateCommandPool},
{"vkDestroyCommandPool", (void *)DestroyCommandPool},
{"vkResetCommandPool", (void *)ResetCommandPool},
{"vkCreateQueryPool", (void *)CreateQueryPool},
{"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
{"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
{"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
{"vkEndCommandBuffer", (void *)EndCommandBuffer},
{"vkResetCommandBuffer", (void *)ResetCommandBuffer},
{"vkCmdBindPipeline", (void *)CmdBindPipeline},
{"vkCmdSetViewport", (void *)CmdSetViewport},
{"vkCmdSetScissor", (void *)CmdSetScissor},
{"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
{"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
{"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
{"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
{"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
{"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
{"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
{"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
{"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
{"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
{"vkCmdDraw", (void *)CmdDraw},
{"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
{"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
{"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
{"vkCmdDispatch", (void *)CmdDispatch},
{"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
{"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
{"vkCmdCopyImage", (void *)CmdCopyImage},
{"vkCmdBlitImage", (void *)CmdBlitImage},
{"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
{"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
{"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
{"vkCmdFillBuffer", (void *)CmdFillBuffer},
{"vkCmdClearColorImage", (void *)CmdClearColorImage},
{"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
{"vkCmdClearAttachments", (void *)CmdClearAttachments},
{"vkCmdResolveImage", (void *)CmdResolveImage},
{"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
{"vkCmdSetEvent", (void *)CmdSetEvent},
{"vkCmdResetEvent", (void *)CmdResetEvent},
{"vkCmdWaitEvents", (void *)CmdWaitEvents},
{"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
{"vkCmdBeginQuery", (void *)CmdBeginQuery},
{"vkCmdEndQuery", (void *)CmdEndQuery},
{"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
{"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
{"vkCmdPushConstants", (void *)CmdPushConstants},
{"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
{"vkCreateFramebuffer", (void *)CreateFramebuffer},
{"vkCreateShaderModule", (void *)CreateShaderModule},
{"vkCreateRenderPass", (void *)CreateRenderPass},
{"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
{"vkCmdNextSubpass", (void *)CmdNextSubpass},
{"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
{"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
{"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
{"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
{"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
{"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
{"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
{"vkSetEvent", (void *)SetEvent},
{"vkMapMemory", (void *)MapMemory},
{"vkUnmapMemory", (void *)UnmapMemory},
{"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
{"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
{"vkAllocateMemory", (void *)AllocateMemory},
{"vkFreeMemory", (void *)FreeMemory},
{"vkBindBufferMemory", (void *)BindBufferMemory},
{"vkBindBufferMemory2", (void *)BindBufferMemory2},
{"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
{"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
{"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
{"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
{"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
{"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
{"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
{"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
{"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
{"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
{"vkGetPhysicalDeviceImageFormatProperties2", (void *)GetPhysicalDeviceImageFormatProperties2},
{"vkGetPhysicalDeviceImageFormatProperties2KHR", (void *)GetPhysicalDeviceImageFormatProperties2KHR},
{"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
{"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
{"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
{"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
{"vkBindImageMemory", (void *)BindImageMemory},
{"vkBindImageMemory2", (void *)BindImageMemory2},
{"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
{"vkQueueBindSparse", (void *)QueueBindSparse},
{"vkCreateSemaphore", (void *)CreateSemaphore},
{"vkCreateEvent", (void *)CreateEvent},
{"vkCreateSamplerYcbcrConversion", (void *)CreateSamplerYcbcrConversion},
{"vkCreateSamplerYcbcrConversionKHR", (void *)CreateSamplerYcbcrConversionKHR},
{"vkDestroySamplerYcbcrConversion", (void *)DestroySamplerYcbcrConversion},
{"vkDestroySamplerYcbcrConversionKHR", (void *)DestroySamplerYcbcrConversionKHR},
#ifdef VK_USE_PLATFORM_ANDROID_KHR
{"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
{"vkGetAndroidHardwareBufferPropertiesANDROID", (void *)GetAndroidHardwareBufferPropertiesANDROID},
{"vkGetMemoryAndroidHardwareBufferANDROID", (void *)GetMemoryAndroidHardwareBufferANDROID},
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
{"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
{"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
{"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
{"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
{"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
{"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
{"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
{"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
{"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
{"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
{"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
{"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
{"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
{"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
#endif
{"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
{"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
{"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
{"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
{"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
{"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
{"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
{"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
{"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
{"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
{"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
{"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
{"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
{"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
{"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
{"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
{"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
{"vkGetPhysicalDeviceDisplayPlaneProperties2KHR", (void *)GetPhysicalDeviceDisplayPlaneProperties2KHR},
{"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
{"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
{"vkGetDisplayPlaneCapabilities2KHR", (void *)GetDisplayPlaneCapabilities2KHR},
{"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
{"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
{"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
{"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
{"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
{"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
{"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
{"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
{"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
{"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
{"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
{"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
{"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
{"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
{"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
{"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
{"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
{"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
{"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
{"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
{"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
{"vkCmdDrawIndirectCountKHR", (void *)CmdDrawIndirectCountKHR},
{"vkCmdDrawIndexedIndirectCountKHR", (void *)CmdDrawIndexedIndirectCountKHR},
{"vkCmdSetExclusiveScissorNV", (void *)CmdSetExclusiveScissorNV},
{"vkCmdBindShadingRateImageNV", (void *)CmdBindShadingRateImageNV},
{"vkCmdSetViewportShadingRatePaletteNV", (void *)CmdSetViewportShadingRatePaletteNV},
{"vkCmdDrawMeshTasksNV", (void *)CmdDrawMeshTasksNV},
{"vkCmdDrawMeshTasksIndirectNV", (void *)CmdDrawMeshTasksIndirectNV},
{"vkCmdDrawMeshTasksIndirectCountNV", (void *)CmdDrawMeshTasksIndirectCountNV},
{"vkCreateRayTracingPipelinesNV", (void *)CreateRayTracingPipelinesNV},
{"vkCreateRenderPass2KHR", (void *)CreateRenderPass2KHR},
{"vkCmdBeginRenderPass2KHR", (void *)CmdBeginRenderPass2KHR},
{"vkCmdNextSubpass2KHR", (void *)CmdNextSubpass2KHR},
{"vkCmdEndRenderPass2KHR", (void *)CmdEndRenderPass2KHR},
{"vkGetBufferDeviceAddressEXT", (void *)GetBufferDeviceAddressEXT},
};
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
assert(device);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, device_data->extensions.device_extension_set)) {
return nullptr;
}
// Is API to be intercepted by this layer?
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
auto &table = device_data->dispatch_table;
if (!table.GetDeviceProcAddr) return nullptr;
return table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
instance_layer_data *instance_data;
// Is API to be intercepted by this layer?
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
auto &table = instance_data->dispatch_table;
if (!table.GetInstanceProcAddr) return nullptr;
return table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
assert(instance);
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
auto &table = instance_data->dispatch_table;
if (!table.GetPhysicalDeviceProcAddr) return nullptr;
return table.GetPhysicalDeviceProcAddr(instance, funcName);
}
} // namespace core_validation
// loader-layer interface v0, just wrappers since there is only a layer
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return core_validation::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return core_validation::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
const char *funcName) {
return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
}
if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
} else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
}
return VK_SUCCESS;
}
| 1 | 9,929 | We could delay this look-up now and only do it in the non-skip case. I suspect this is true for (almost) all of the top-level refactored functions. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -144,6 +144,15 @@ std::string MetaServiceUtils::schemaTagKey(GraphSpaceID spaceId, TagID tagId, in
return key;
}
+std::string MetaServiceUtils::schemaTagPrefix(GraphSpaceID spaceId, TagID tagId) {
+ std::string key;
+ key.reserve(kTagsTable.size() + sizeof(GraphSpaceID) + sizeof(TagID));
+ key.append(kTagsTable.data(), kTagsTable.size());
+ key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
+ key.append(reinterpret_cast<const char*>(&tagId), sizeof(tagId));
+ return key;
+}
+
std::string MetaServiceUtils::schemaTagsPrefix(GraphSpaceID spaceId) {
std::string key;
key.reserve(kTagsTable.size() + sizeof(GraphSpaceID)); | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "meta/MetaServiceUtils.h"
#include <thrift/lib/cpp2/protocol/Serializer.h>
#include <thrift/lib/cpp2/protocol/CompactProtocol.h>
namespace nebula {
namespace meta {
const std::string kSpacesTable = "__spaces__"; // NOLINT
const std::string kPartsTable = "__parts__"; // NOLINT
const std::string kHostsTable = "__hosts__"; // NOLINT
const std::string kTagsTable = "__tags__"; // NOLINT
const std::string kEdgesTable = "__edges__"; // NOLINT
const std::string kIndexTable = "__index__"; // NOLINT
std::string MetaServiceUtils::spaceKey(GraphSpaceID spaceId) {
std::string key;
key.reserve(256);
key.append(kSpacesTable.data(), kSpacesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::spaceVal(int32_t partsNum,
int32_t replicaFactor,
const std::string& name) {
std::string val;
val.reserve(256);
val.append(reinterpret_cast<const char*>(&partsNum), sizeof(partsNum));
val.append(reinterpret_cast<const char*>(&replicaFactor), sizeof(replicaFactor));
val.append(name);
return val;
}
const std::string& MetaServiceUtils::spacePrefix() {
return kSpacesTable;
}
GraphSpaceID MetaServiceUtils::spaceId(folly::StringPiece rawKey) {
return *reinterpret_cast<const GraphSpaceID*>(rawKey.data() + kSpacesTable.size());
}
folly::StringPiece MetaServiceUtils::spaceName(folly::StringPiece rawVal) {
return rawVal.subpiece(sizeof(int32_t)*2);
}
std::string MetaServiceUtils::partKey(GraphSpaceID spaceId, PartitionID partId) {
std::string key;
key.reserve(128);
key.append(kPartsTable.data(), kPartsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(reinterpret_cast<const char*>(&partId), sizeof(PartitionID));
return key;
}
std::string MetaServiceUtils::partVal(const std::vector<nebula::cpp2::HostAddr>& hosts) {
std::string val;
val.reserve(128);
for (auto& h : hosts) {
val.append(reinterpret_cast<const char*>(&h.ip), sizeof(h.ip));
val.append(reinterpret_cast<const char*>(&h.port), sizeof(h.port));
}
return val;
}
std::string MetaServiceUtils::partPrefix(GraphSpaceID spaceId) {
std::string prefix;
prefix.reserve(128);
prefix.append(kPartsTable.data(), kPartsTable.size());
prefix.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
return prefix;
}
std::vector<nebula::cpp2::HostAddr> MetaServiceUtils::parsePartVal(folly::StringPiece val) {
std::vector<nebula::cpp2::HostAddr> hosts;
static const size_t unitSize = sizeof(int32_t) * 2;
auto hostsNum = val.size() / unitSize;
hosts.reserve(hostsNum);
VLOG(3) << "Total size:" << val.size()
<< ", host size:" << unitSize
<< ", host num:" << hostsNum;
for (decltype(hostsNum) i = 0; i < hostsNum; i++) {
nebula::cpp2::HostAddr h;
h.set_ip(*reinterpret_cast<const int32_t*>(val.data() + i * unitSize));
h.set_port(*reinterpret_cast<const int32_t*>(val.data() + i * unitSize + sizeof(int32_t)));
hosts.emplace_back(std::move(h));
}
return hosts;
}
std::string MetaServiceUtils::hostKey(IPv4 ip, Port port) {
std::string key;
key.reserve(128);
key.append(kHostsTable.data(), kHostsTable.size());
key.append(reinterpret_cast<const char*>(&ip), sizeof(ip));
key.append(reinterpret_cast<const char*>(&port), sizeof(port));
return key;
}
std::string MetaServiceUtils::hostVal() {
return "";
}
const std::string& MetaServiceUtils::hostPrefix() {
return kHostsTable;
}
nebula::cpp2::HostAddr MetaServiceUtils::parseHostKey(folly::StringPiece key) {
nebula::cpp2::HostAddr host;
memcpy(&host, key.data() + kHostsTable.size(), sizeof(host));
return host;
}
std::string MetaServiceUtils::schemaEdgeKey(GraphSpaceID spaceId,
EdgeType edgeType,
int64_t version) {
std::string key;
key.reserve(128);
key.append(kEdgesTable.data(), kEdgesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&edgeType), sizeof(edgeType));
key.append(reinterpret_cast<const char*>(&version), sizeof(version));
return key;
}
std::string MetaServiceUtils::schemaEdgeVal(nebula::cpp2::Schema schema) {
std::string val;
apache::thrift::CompactSerializer::serialize(schema, &val);
return val;
}
std::string MetaServiceUtils::schemaTagKey(GraphSpaceID spaceId, TagID tagId, int64_t version) {
std::string key;
key.reserve(128);
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&tagId), sizeof(tagId));
key.append(reinterpret_cast<const char*>(&version), sizeof(version));
return key;
}
std::string MetaServiceUtils::schemaTagsPrefix(GraphSpaceID spaceId) {
std::string key;
key.reserve(kTagsTable.size() + sizeof(GraphSpaceID));
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::schemaTagVal(const std::string& name, nebula::cpp2::Schema schema) {
int32_t len = name.size();
std::string val, sval;
apache::thrift::CompactSerializer::serialize(schema, &sval);
val.reserve(sizeof(int32_t) + name.size() + sval.size());
val.append(reinterpret_cast<const char*>(&len), sizeof(int32_t));
val.append(name);
val.append(sval);
return val;
}
nebula::cpp2::Schema MetaServiceUtils::parseSchema(folly::StringPiece rawData) {
nebula::cpp2::Schema schema;
int32_t offset = sizeof(int32_t) + *reinterpret_cast<const int32_t *>(rawData.begin());
auto schval = rawData.subpiece(offset, rawData.size() - offset);
apache::thrift::CompactSerializer::deserialize(schval, schema);
return schema;
}
std::string MetaServiceUtils::indexKey(EntryType type, const std::string& name) {
std::string key;
key.reserve(128);
key.append(kIndexTable.data(), kIndexTable.size());
key.append(reinterpret_cast<const char*>(&type), sizeof(type));
key.append(name);
return key;
}
std::string MetaServiceUtils::assembleSegmentKey(const std::string& segment,
const std::string& key) {
std::string segmentKey;
segmentKey.reserve(64);
segmentKey.append(segment);
segmentKey.append(key.data(), key.size());
return segmentKey;
}
} // namespace meta
} // namespace nebula
| 1 | 16,132 | Typically, to be simple, we could reserve a const length space. | vesoft-inc-nebula | cpp |
@@ -0,0 +1,2 @@
+from .reader import *
+from .builder import Builder | 1 | 1 | 9,547 | Need to add a license and copyright header to each file. | google-flatbuffers | java |
|
@@ -7,6 +7,8 @@
<% end %>
<%= form.inputs do %>
+ <%= hidden_field_tag "coupon_id" %>
+
<% if signed_out? %>
<ul class="checkout-signin-signup-toggle">
<li> | 1 | <%= semantic_form_for checkout, url: checkouts_path(checkout.plan), html: { method: 'post' } do |form| %>
<%= form.semantic_errors %>
<% if signed_in? %>
<h2 class="one-step-away">Hey <%= current_user.first_name %>, you're one step away. Enter payment below to start learning with Upcase now!</h2>
<% end %>
<%= form.inputs do %>
<% if signed_out? %>
<ul class="checkout-signin-signup-toggle">
<li>
<%= link_to "Already have an account? Sign in",
sign_in_path(return_to: request.fullpath), class: "cta-button secondary-button" %>
</li>
<li><%= link_to "Sign up with GitHub", authenticated_on_checkout_path(plan: checkout.plan), class: "cta-button secondary-button" %></li>
</ul>
<% end %>
<%= hidden_field_tag "coupon_id" %>
<% if signed_out? %>
<%= form.input :name, required: true %>
<%= form.input :email, as: :email, required: true %>
<%= form.input :password, required: true %>
<% end %>
<% if checkout.needs_github_username? %>
<%= form.input :github_username, required: true, label: "GitHub username",
hint: "Be sure to enter a valid, unique GitHub username. Organizations are not allowed." %>
<% end %>
<% end %>
<%= form.inputs id: 'billing-information' do %>
<h3>Secure Credit Card Payment</h3>
<li id="credit-card-icons">
<%= image_tag "icons/visa.png" %>
<%= image_tag "icons/master.png" %>
<%= image_tag "icons/american_express.png" %>
<%= image_tag "icons/discover.png" %>
</li>
<li class="payment-errors"></li>
<li id="checkout_cc_input" class="stripe">
<label for='card-number'>Card Number</label>
<input type='text' size='20' autocomplete='off' id='card-number' class='card-number'/>
</li>
<li id="checkout_expiration_input" class="stripe">
<label>Expiration</label>
<%= select_month nil, { prompt: 'Month', add_month_numbers: true }, class: 'card-expiry-month' %>
<%= select_year nil, { prompt: 'Year', start_year: Time.zone.today.year, end_year: 10.years.from_now.year }, class: 'card-expiry-year' %>
</li>
<li id="checkout_cvc_input" class="stripe">
<label for='card-cvc'>CVC</label>
<input type='text' size='4' autocomplete='off' id='card-cvc' class='card-cvc'/>
</li>
<% end %>
<%= form.inputs class: 'address-info' do %>
<%= render 'users/address_fields', form: form %>
<% end %>
<%= form.actions do %>
<%= form.action :submit, label: "Submit Payment — #{submit_amount(checkout)}".html_safe,
button_html: { class: "subscribe-cta light-bg" } %>
<% end %>
<% end %>
<%= render partial: 'shared/stripe' %>
| 1 | 16,984 | If I'm not mistaken, this line is now outside of the `if signed_out?` block, right? Any concerns about that? Seems odd that we wouldn't accept coupons for signed in users, but I want to make sure we understand the ramifications of this change. | thoughtbot-upcase | rb |
@@ -130,7 +130,7 @@ public abstract class PostgrePrivilege implements DBAPrivilege, Comparable<Postg
@NotNull
@Override
- public DBPDataSource getDataSource() {
+ public PostgreDataSource getDataSource() {
return owner.getDataSource();
}
| 1 | /*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.postgresql.model;
import org.jkiss.code.NotNull;
import org.jkiss.code.Nullable;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.model.DBPDataSource;
import org.jkiss.dbeaver.model.access.DBAPrivilege;
import org.jkiss.dbeaver.model.access.DBAPrivilegeGrant;
import org.jkiss.dbeaver.model.access.DBARole;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.struct.DBSObject;
import java.util.List;
/**
* PostgrePrivilege
*/
public abstract class PostgrePrivilege implements DBAPrivilege, Comparable<PostgrePrivilege> {
public static final short NONE = 0;
public static final short GRANTED = 1;
public static final short WITH_GRANT_OPTION = 2;
public static final short WITH_HIERARCHY = 4;
public class ObjectPermission implements DBAPrivilegeGrant {
@NotNull
private PostgrePrivilegeType privilegeType;
@NotNull
private String grantor;
private short permissions;
public ObjectPermission(@NotNull PostgrePrivilegeType privilegeType, @NotNull String grantor, short permissions) {
this.privilegeType = privilegeType;
this.grantor = grantor;
this.permissions = permissions;
}
@Override
public DBARole getSubject(DBRProgressMonitor monitor) throws DBException {
return owner instanceof DBARole ? (DBARole) owner : (DBARole) getTargetObject(monitor);
}
@Override
public DBSObject getObject(DBRProgressMonitor monitor) throws DBException {
return owner instanceof DBARole ? getTargetObject(monitor) : owner;
}
@Override
public DBAPrivilege[] getPrivileges() {
return new DBAPrivilege[] { PostgrePrivilege.this };
}
@NotNull
public PostgrePrivilegeType getPrivilegeType() {
return privilegeType;
}
@Override
public boolean isGranted() {
return (permissions & GRANTED) == GRANTED;
}
@NotNull
public String getGrantor() {
return grantor;
}
public short getPermissions() {
return permissions;
}
@Override
public String toString() {
return privilegeType.toString();
}
}
protected final PostgrePrivilegeOwner owner;
private ObjectPermission[] permissions;
public PostgrePrivilege(PostgrePrivilegeOwner owner, List<PostgrePrivilegeGrant> grants) {
this.owner = owner;
this.permissions = new ObjectPermission[grants.size()];
for (int i = 0 ; i < grants.size(); i++) {
final PostgrePrivilegeGrant privilege = grants.get(i);
short permission = GRANTED;
if (privilege.isGrantable()) permission |= WITH_GRANT_OPTION;
if (privilege.isWithHierarchy()) permission |= WITH_HIERARCHY;
this.permissions[i] = new ObjectPermission(privilege.getPrivilegeType(), privilege.getGrantor(), permission);
}
}
public DBAPrivilegeGrant[] getGrants() {
return permissions;
}
@Override
public boolean isPersisted() {
return true;
}
@Nullable
@Override
public String getDescription() {
return null;
}
@Nullable
@Override
public PostgrePrivilegeOwner getParentObject() {
return owner;
}
@NotNull
@Override
public DBPDataSource getDataSource() {
return owner.getDataSource();
}
public PostgrePrivilegeOwner getOwner() {
return owner;
}
public abstract PostgreObject getTargetObject(DBRProgressMonitor monitor) throws DBException;
public ObjectPermission[] getPermissions() {
return permissions;
}
public PostgrePrivilegeType[] getPrivileges() {
PostgrePrivilegeType[] ppt = new PostgrePrivilegeType[permissions.length];
for (int i = 0; i < permissions.length; i++) {
ppt[i] = permissions[i].getPrivilegeType();
}
return ppt;
}
public short getPermission(PostgrePrivilegeType privilegeType) {
for (ObjectPermission permission : permissions) {
if (permission.privilegeType == privilegeType || permission.privilegeType == PostgrePrivilegeType.ALL) {
return permission.permissions;
}
}
return NONE;
}
public void setPermission(PostgrePrivilegeType privilegeType, boolean permit) {
for (ObjectPermission permission : permissions) {
if (permission.privilegeType == privilegeType) {
if (permit) {
permission.permissions |= GRANTED;
} else {
permission.permissions = 0;
}
}
}
}
// Properties for permissions viewer
/*
@Property(viewable = true, editable = true, updatable = true, order = 100, name = "SELECT")
public boolean hasPermissionSelect() {
return getPermission(PostgrePrivilegeType.SELECT) != 0;
}
@Property(viewable = true, order = 101, name = "INSERT")
public boolean hasPermissionInsert() {
return getPermission(PostgrePrivilegeType.INSERT) != 0;
}
@Property(viewable = true, order = 102, name = "UPDATE")
public boolean hasPermissionUpdate() {
return getPermission(PostgrePrivilegeType.UPDATE) != 0;
}
@Property(viewable = true, order = 103, name = "DELETE")
public boolean hasPermissionDelete() {
return getPermission(PostgrePrivilegeType.DELETE) != 0;
}
@Property(viewable = true, order = 104, name = "TRUNCATE")
public boolean hasPermissionTruncate() {
return getPermission(PostgrePrivilegeType.TRUNCATE) != 0;
}
@Property(viewable = true, order = 105, name = "REFERENCES")
public boolean hasPermissionReferences() {
return getPermission(PostgrePrivilegeType.REFERENCES) != 0;
}
@Property(viewable = true, order = 106, name = "TRIGGER")
public boolean hasPermissionTrigger() {
return getPermission(PostgrePrivilegeType.TRIGGER) != 0;
}
*/
/**
* Checks all privileges
*/
public boolean hasAllPrivileges(Object object) {
for (PostgrePrivilegeType pt : PostgrePrivilegeType.values()) {
if (pt.isValid() && pt.supportsType(object.getClass()) && getPermission(pt) == 0) {
return false;
}
}
return true;
}
}
| 1 | 11,078 | Please remove the unused import of DBPDataSource. | dbeaver-dbeaver | java |
@@ -43,7 +43,7 @@ namespace Microsoft.DotNet.Build.Tasks.Feed
public void LogError(string data)
{
- _log.LogError(data);
+ _log.LogWarning(data);
}
public void LogInformation(string data) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using NuGet.Common;
using System.Threading.Tasks;
using MSBuild = Microsoft.Build.Utilities;
namespace Microsoft.DotNet.Build.Tasks.Feed
{
public class SleetLogger : ILogger
{
private MSBuild.TaskLoggingHelper _log;
public SleetLogger(MSBuild.TaskLoggingHelper log)
{
_log = log;
}
public void Log(LogLevel level, string data)
{
_log.LogMessage(data, level);
}
public void Log(ILogMessage message)
{
_log.LogMessage(message.Message, message.Level);
}
public Task LogAsync(LogLevel level, string data)
{
return Task.Run(() => _log.LogMessage(data, level));
}
public Task LogAsync(ILogMessage message)
{
return Task.Run(() => _log.LogMessage(message.Message, message.Level));
}
public void LogDebug(string data)
{
_log.LogMessage(data, LogLevel.Debug);
}
public void LogError(string data)
{
_log.LogError(data);
}
public void LogInformation(string data)
{
_log.LogMessage(data, LogLevel.Information);
}
public void LogInformationSummary(string data)
{
_log.LogMessage(data, LogLevel.Information);
}
public void LogMinimal(string data)
{
_log.LogMessage(data, LogLevel.Minimal);
}
public void LogVerbose(string data)
{
_log.LogMessage(data, LogLevel.Verbose);
}
public void LogWarning(string data)
{
_log.LogWarning(data);
}
}
}
| 1 | 14,024 | I think you should add some extra message here saying it was originally sent as an error, for diagnosability. | dotnet-buildtools | .cs |
@@ -144,14 +144,14 @@ void test3() {
ExplicitBitVect bv(2048);
AvalonTools::getAvalonFP("c1cocc1", true, bv, 2048, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1cocc1 " << bv.getNumOnBits() << std::endl;
- TEST_ASSERT(bv.getNumOnBits() == 53);
+ TEST_ASSERT(bv.getNumOnBits() == 48);
}
{
ExplicitBitVect bv(2048);
AvalonTools::getAvalonFP("C1=COC=C1", true, bv, 2048, false, true,
0x006FFF);
BOOST_LOG(rdInfoLog) << "C1=COC=C1 " << bv.getNumOnBits() << std::endl;
- TEST_ASSERT(bv.getNumOnBits() == 53);
+ TEST_ASSERT(bv.getNumOnBits() == 48);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl; | 1 | // $Id$
//
// Created by Greg Landrum, July 2008
//
//
// Expected test results here correspond to v1.0 of the open-source
// avalontoolkit
//
#include <RDGeneral/RDLog.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/FileParsers/FileParsers.h>
#include <RDGeneral/Invariant.h>
#include <DataStructs/ExplicitBitVect.h>
#include <iostream>
#include <fstream>
#include <cstdio>
#include "AvalonTools.h"
#include <string>
using namespace RDKit;
void test1() {
BOOST_LOG(rdInfoLog) << "testing canonical smiles generation" << std::endl;
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1ccccc1"));
TEST_ASSERT(m);
std::string smi = AvalonTools::getCanonSmiles(*m);
TEST_ASSERT(smi == "c1ccccc1");
delete m;
}
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1cccnc1"));
TEST_ASSERT(m);
std::string smi = AvalonTools::getCanonSmiles(*m);
TEST_ASSERT(smi == "c1ccncc1");
delete m;
}
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("n1ccccc1"));
TEST_ASSERT(m);
std::string smi = AvalonTools::getCanonSmiles(*m);
TEST_ASSERT(smi == "c1ccncc1");
delete m;
}
{
std::string smi = AvalonTools::getCanonSmiles("n1ccccc1", true);
TEST_ASSERT(smi == "c1ccncc1");
}
{
std::string smi = AvalonTools::getCanonSmiles("c1cccnc1", true);
TEST_ASSERT(smi == "c1ccncc1");
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test2() {
BOOST_LOG(rdInfoLog) << "testing coordinate generation" << std::endl;
#if 1
{
RWMol *m = SmilesToMol("c1cccnc1");
TEST_ASSERT(m);
unsigned int confId = AvalonTools::set2DCoords(*m);
TEST_ASSERT(m->getNumConformers() == 1);
TEST_ASSERT(confId == 0);
delete m;
}
#endif
{
std::string molb = AvalonTools::set2DCoords("c1cccnc1", true);
TEST_ASSERT(molb != "");
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test3() {
BOOST_LOG(rdInfoLog) << "testing fingerprint generation" << std::endl;
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1ccccn1"));
TEST_ASSERT(m);
ExplicitBitVect bv(512);
AvalonTools::getAvalonFP(*m, bv, 512, false, true, 0x00006FFF);
BOOST_LOG(rdInfoLog) << "c1ccccn1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 18);
delete m;
}
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1ccccc1"));
TEST_ASSERT(m);
ExplicitBitVect bv(512);
AvalonTools::getAvalonFP(*m, bv, 512, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1ccccn1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 6);
delete m;
}
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1nnccc1"));
TEST_ASSERT(m);
ExplicitBitVect bv(512);
AvalonTools::getAvalonFP(*m, bv, 512, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1nnccc1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 28);
delete m;
}
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1ncncc1"));
TEST_ASSERT(m);
ExplicitBitVect bv(512);
AvalonTools::getAvalonFP(*m, bv, 512, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1ncncc1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 25);
delete m;
}
{
ExplicitBitVect bv(512);
AvalonTools::getAvalonFP("c1cccnc1", true, bv, 512, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1cccnc1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 18);
}
{
ExplicitBitVect bv(512);
AvalonTools::getAvalonFP("c1ccccc1", true, bv, 512, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1ccccc1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 6);
}
{
ROMol *m = static_cast<ROMol *>(SmilesToMol("c1cccnc1"));
TEST_ASSERT(m);
ExplicitBitVect bv(1024);
AvalonTools::getAvalonFP(*m, bv, 1024, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1cccnc1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 19);
delete m;
}
{
ExplicitBitVect bv(2048);
AvalonTools::getAvalonFP("c1cocc1", true, bv, 2048, false, true, 0x006FFF);
BOOST_LOG(rdInfoLog) << "c1cocc1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 53);
}
{
ExplicitBitVect bv(2048);
AvalonTools::getAvalonFP("C1=COC=C1", true, bv, 2048, false, true,
0x006FFF);
BOOST_LOG(rdInfoLog) << "C1=COC=C1 " << bv.getNumOnBits() << std::endl;
TEST_ASSERT(bv.getNumOnBits() == 53);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testRDK151() {
BOOST_LOG(rdInfoLog) << "testing Jira issue RDK-151: pyAvalonTools not "
"generating chiral smiles from molecules"
<< std::endl;
{
std::string tSmi = "C[C@H](F)Cl";
ROMol *m = static_cast<ROMol *>(SmilesToMol(tSmi));
TEST_ASSERT(m);
std::string smi = AvalonTools::getCanonSmiles(tSmi, true);
CHECK_INVARIANT(smi == tSmi, smi + "!=" + tSmi);
smi = AvalonTools::getCanonSmiles(*m);
CHECK_INVARIANT(smi == tSmi, smi + "!=" + tSmi);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testSmilesFailures() {
BOOST_LOG(rdInfoLog) << "testing handling of bad smiles strings" << std::endl;
{
std::string tSmi = "C1C";
std::string smi = AvalonTools::getCanonSmiles(tSmi, true);
CHECK_INVARIANT(smi == "", smi);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testSubstructFps() {
BOOST_LOG(rdInfoLog) << "testing substructure fingerprints " << std::endl;
{
ExplicitBitVect bv1(512), bv2(512);
AvalonTools::getAvalonFP("c1ccccc1", true, bv1, 512, true, true,
AvalonTools::avalonSSSBits);
AvalonTools::getAvalonFP("c1ccccc1C(F)(F)F", true, bv2, 512);
TEST_ASSERT((bv1 & bv2) == bv1);
AvalonTools::getAvalonFP("c1ccccc1C(F)(F)F", true, bv1, 512);
TEST_ASSERT((bv1 & bv2) == bv1);
AvalonTools::getAvalonFP("c1cccc(C)c1C(F)(F)F", true, bv2, 512);
TEST_ASSERT((bv1 & bv2) == bv1);
}
{
ExplicitBitVect bv1(512), bv2(512);
AvalonTools::getAvalonFP("c1ccccc1O", true, bv1, 512, true, true,
AvalonTools::avalonSSSBits);
AvalonTools::getAvalonFP("c1ccccc1OC", true, bv2, 512);
TEST_ASSERT((bv1 & bv2) == bv1);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testStruChk() {
BOOST_LOG(rdInfoLog) << "testing structure checking " << std::endl;
{
int errs = 0;
RDKit::ROMOL_SPTR m = AvalonTools::checkMol(errs, "c1ccccc1", true);
TEST_ASSERT(errs == 0);
m = AvalonTools::checkMol(errs, "c1c(R)cccc1C1(CC-C(C)C1)C", true);
TEST_ASSERT(errs != 0);
}
{
int errs = 0;
std::string res;
boost::tie(res, errs) = AvalonTools::checkMolString("c1ccccc1", true);
TEST_ASSERT(errs == 0);
TEST_ASSERT(res != "");
boost::tie(res, errs) =
AvalonTools::checkMolString("c1c(R)cccc1C1(CC-C(C)C1)C", true);
TEST_ASSERT(errs == 1);
TEST_ASSERT(res == "");
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testBadMolfile() {
BOOST_LOG(rdInfoLog) << "testing handling bad molecules " << std::endl;
// some tests around dealing with bad mol blocks
{
std::string molb =
"SNAP007157A\n\
MACCS-II3194121345\n\
\n\
0 0 0 0 0";
std::string smi = AvalonTools::getCanonSmiles(molb, false);
CHECK_INVARIANT(smi == "", smi);
ExplicitBitVect bv(1024);
AvalonTools::getAvalonFP(molb, false, bv, 1024);
TEST_ASSERT(bv.getNumOnBits() == 0);
std::string oMolb;
AvalonTools::set2DCoords(molb, false);
CHECK_INVARIANT(oMolb == "", oMolb);
}
}
void testSmilesSegFault() {
BOOST_LOG(rdInfoLog)
<< "testing a canonical smiles case that led to seg faults " << std::endl;
// some tests around dealing with bad mol blocks
{
std::string inSmi(1024, 'C');
std::string smi = AvalonTools::getCanonSmiles(inSmi, true);
TEST_ASSERT(smi == inSmi);
}
{
std::string inSmi(1534, 'C');
std::string smi = AvalonTools::getCanonSmiles(inSmi, true);
TEST_ASSERT(smi == inSmi);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub336() {
BOOST_LOG(rdInfoLog) << "testing github issue 336: bad canonical smiles for "
"conjugated double bonds" << std::endl;
// some tests around dealing with bad mol blocks
{
std::string pathName = getenv("RDBASE");
pathName += "/External/AvalonTools/test_data/";
std::ifstream ins((pathName + "EZ_test.2.sdf").c_str());
std::string mb((std::istreambuf_iterator<char>(ins)),
std::istreambuf_iterator<char>());
ROMol *m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 17);
std::string smi1 = AvalonTools::getCanonSmiles(mb, false);
std::string smi2 = AvalonTools::getCanonSmiles(*m);
std::cerr << "smi1: " << smi1 << std::endl;
std::cerr << "smi2: " << smi2 << std::endl;
TEST_ASSERT(smi1 == smi2);
delete m;
}
{
std::string pathName = getenv("RDBASE");
pathName += "/External/AvalonTools/test_data/";
std::ifstream ins((pathName + "heterocycle.mol").c_str());
std::string mb((std::istreambuf_iterator<char>(ins)),
std::istreambuf_iterator<char>());
RWMol *m = MolBlockToMol(mb, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 6);
m->updatePropertyCache();
MolOps::cleanUp(*m);
MolOps::setAromaticity(*m);
std::string smi1 = AvalonTools::getCanonSmiles(mb, false);
std::string smi2 = AvalonTools::getCanonSmiles(*m);
std::cerr << "smi1: " << smi1 << std::endl;
std::cerr << "smi2: " << smi2 << std::endl;
TEST_ASSERT(smi1 == smi2);
TEST_ASSERT(smi1 == "CC1C=NNC=1");
delete m;
}
{
std::string pathName = getenv("RDBASE");
pathName += "/External/AvalonTools/test_data/";
std::ifstream ins((pathName + "heterocycle2.mol").c_str());
std::string mb((std::istreambuf_iterator<char>(ins)),
std::istreambuf_iterator<char>());
RWMol *m = MolBlockToMol(mb, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 11);
m->updatePropertyCache();
MolOps::cleanUp(*m);
MolOps::setAromaticity(*m);
std::string smi1 = AvalonTools::getCanonSmiles(mb, false);
std::string smi2 = AvalonTools::getCanonSmiles(*m);
std::cerr << "smi1: " << smi1 << std::endl;
std::cerr << "smi2: " << smi2 << std::endl;
TEST_ASSERT(smi1 == smi2);
TEST_ASSERT(smi1 == "CN2C=CC1=CC(=O)NC=C12");
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testCountFps() {
BOOST_LOG(rdInfoLog) << "testing substructure fingerprints " << std::endl;
{
SparseIntVect<boost::uint32_t> cv1(5000), cv2(5000);
AvalonTools::getAvalonCountFP("c1ccccc1", true, cv1, 5000);
AvalonTools::getAvalonCountFP("c1ccccc1.c1ccccc1", true, cv2, 5000);
for (unsigned int i = 0; i < cv1.size(); ++i) {
if (cv1[i] && (cv2[i] != 2 * cv1[i])) {
std::cerr << " mismatch: " << i << " " << cv1[i] << " " << cv2[i]
<< std::endl;
}
}
for (unsigned int i = 0; i < cv1.size(); ++i) {
TEST_ASSERT(!cv1[i] || (cv2[i] == 2 * cv1[i]));
}
}
{
ROMol *m1 = static_cast<ROMol *>(SmilesToMol("c1ccccc1"));
TEST_ASSERT(m1);
ROMol *m2 = static_cast<ROMol *>(SmilesToMol("c1ccccc1.c1ccccc1"));
TEST_ASSERT(m2);
SparseIntVect<boost::uint32_t> cv1(5000), cv2(5000);
AvalonTools::getAvalonCountFP(*m1, cv1, 5000);
AvalonTools::getAvalonCountFP(*m2, cv2, 5000);
for (unsigned int i = 0; i < cv1.size(); ++i) {
if (cv1[i] && (cv2[i] != 2 * cv1[i])) {
std::cerr << " mismatch: " << i << " " << cv1[i] << " " << cv2[i]
<< std::endl;
}
}
for (unsigned int i = 0; i < cv1.size(); ++i) {
TEST_ASSERT(!cv1[i] || (cv2[i] == 2 * cv1[i]));
}
delete m1;
delete m2;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testInitStruChk() {
BOOST_LOG(rdInfoLog) << "testing init struchk " << std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Data/struchk/";
std::string struchk_init =
"-tm\n"
"-ta " +
pathName + std::string("checkfgs.trn\n") +
"-tm\n"
"-or\n"
"-ca " +
pathName + std::string("checkfgs.chk\n") +
"-cc\n"
"-cl 3\n"
"-cs\n"
"-cn 999\n"
"-l " +
std::string(std::tmpnam(NULL)) + std::string("\n");
int errs = AvalonTools::initCheckMol(struchk_init);
TEST_ASSERT(!errs);
RDKit::ROMOL_SPTR m = AvalonTools::checkMol(errs, "c1ccccc1", true);
TEST_ASSERT(errs == 0);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
int main(int argc, char *argv[]) {
RDLog::InitLogs();
#if 1
test1();
test2();
test3();
testRDK151();
testSmilesFailures();
testSubstructFps();
testStruChk();
testBadMolfile();
testSmilesSegFault();
testGithub336();
testCountFps();
#endif
testInitStruChk();
return 0;
}
| 1 | 14,514 | I believe that all the changes in this file are not valid for v1.2 of the Avalon toolkit. | rdkit-rdkit | cpp |
@@ -509,6 +509,7 @@ public class DBService {
} else {
// carrying over auditEnabled from original role
role.setAuditEnabled(originalRole.getAuditEnabled());
+ mergeOriginalRoleAndMetaRoleAttributes(originalRole, role);
requestSuccess = con.updateRole(domainName, role);
}
| 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.yahoo.athenz.auth.AuthorityConsts;
import com.yahoo.athenz.auth.Principal;
import com.yahoo.athenz.auth.util.AthenzUtils;
import com.yahoo.athenz.common.server.audit.AuditReferenceValidator;
import com.yahoo.athenz.common.server.log.AuditLogMsgBuilder;
import com.yahoo.athenz.common.server.log.AuditLogger;
import com.yahoo.athenz.common.server.util.StringUtils;
import com.yahoo.athenz.zms.store.AthenzDomain;
import com.yahoo.athenz.zms.store.ObjectStore;
import com.yahoo.athenz.zms.store.ObjectStoreConnection;
import com.yahoo.athenz.zms.utils.ZMSUtils;
import com.yahoo.rdl.JSON;
import com.yahoo.rdl.Timestamp;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class DBService {
ObjectStore store;
BitSet auditRefSet;
AuditLogger auditLogger;
Cache<String, DataCache> cacheStore;
QuotaChecker quotaCheck;
int retrySleepTime;
int defaultRetryCount;
int defaultOpTimeout;
ZMSConfig zmsConfig;
private static final Logger LOG = LoggerFactory.getLogger(DBService.class);
public static int AUDIT_TYPE_ROLE = 0;
public static int AUDIT_TYPE_POLICY = 1;
public static int AUDIT_TYPE_SERVICE = 2;
public static int AUDIT_TYPE_DOMAIN = 3;
public static int AUDIT_TYPE_ENTITY = 4;
public static int AUDIT_TYPE_TENANCY = 5;
public static int AUDIT_TYPE_TEMPLATE = 6;
private static final String ROLE_PREFIX = "role.";
private static final String POLICY_PREFIX = "policy.";
private static final String TEMPLATE_DOMAIN_NAME = "_domain_";
private static final String AUDIT_REF = "Athenz User Authority Filter Enforcer";
AuditReferenceValidator auditReferenceValidator;
private ScheduledExecutorService userAuthorityFilterExecutor;
public DBService(ObjectStore store, AuditLogger auditLogger, ZMSConfig zmsConfig, AuditReferenceValidator auditReferenceValidator) {
this.store = store;
this.zmsConfig = zmsConfig;
this.auditLogger = auditLogger;
cacheStore = CacheBuilder.newBuilder().concurrencyLevel(25).build();
// default timeout in seconds for object store commands
defaultOpTimeout = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_STORE_OP_TIMEOUT, "60"));
if (defaultOpTimeout < 0) {
defaultOpTimeout = 60;
}
if (this.store != null) {
this.store.setOperationTimeout(defaultOpTimeout);
}
// retrieve the concurrent update retry count. If we're given an invalid negative
// value for count, we'll default back to our default configured value of 120 retries
// which would result up to 30 seconds sleeping 250ms each time
defaultRetryCount = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_CONFLICT_RETRY_COUNT, "120"));
if (defaultRetryCount < 0) {
defaultRetryCount = 120;
}
retrySleepTime = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_CONFLICT_RETRY_SLEEP_TIME, "250"));
if (retrySleepTime < 0) {
retrySleepTime = 250;
}
// check what objects we're going to enforce audit reference flag
setAuditRefObjectBits();
this.auditReferenceValidator = auditReferenceValidator;
// create our quota checker class
quotaCheck = new QuotaChecker();
// start our thread to process user authority changes daily
userAuthorityFilterExecutor = Executors.newScheduledThreadPool(1);
userAuthorityFilterExecutor.scheduleAtFixedRate(
new UserAuthorityFilterEnforcer(), 0, 1, TimeUnit.DAYS);
}
void setAuditRefObjectBits() {
auditRefSet = new BitSet();
// by default we're only going to handle audit enabled roles
// the value is a comma separated list of supported objects:
// role, policy, service, domain, entity
final String auditCheck = System.getProperty(ZMSConsts.ZMS_PROP_AUDIT_REF_CHECK_OBJECTS, "role");
String[] objects = auditCheck.split(",");
for (String object : objects) {
switch (object) {
case ZMSConsts.ZMS_AUDIT_TYPE_ROLE:
auditRefSet.set(AUDIT_TYPE_ROLE);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_POLICY:
auditRefSet.set(AUDIT_TYPE_POLICY);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_SERVICE:
auditRefSet.set(AUDIT_TYPE_SERVICE);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_DOMAIN:
auditRefSet.set(AUDIT_TYPE_DOMAIN);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_ENTITY:
auditRefSet.set(AUDIT_TYPE_ENTITY);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_TENANCY:
auditRefSet.set(AUDIT_TYPE_TENANCY);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_TEMPLATE:
auditRefSet.set(AUDIT_TYPE_TEMPLATE);
break;
}
}
}
public DomainRoleMembers listOverdueReviewRoleMembers(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listOverdueReviewRoleMembers(domainName);
}
}
static class DataCache {
AthenzDomain athenzDomain;
long modTime;
DataCache(AthenzDomain athenzDomain, long modTime) {
this.athenzDomain = athenzDomain;
this.modTime = modTime;
}
AthenzDomain getAthenzDomain() {
return athenzDomain;
}
long getModTime() {
return modTime;
}
}
AthenzDomain getAthenzDomainFromCache(ObjectStoreConnection con, String domainName) {
DataCache data = cacheStore.getIfPresent(domainName);
if (data == null) {
return null;
}
// if we have a match for a given domain name then we're going
// to check if the last modified domain timestamp matches to what's
// in the db: So if there is no match, then we'll take the hit
// of extra db read, however, in most cases the domain data is not
// changed that often so we'll satisfy the request with just
// verifying the last modification time as oppose to reading the
// full domain data from db
long modTime = 0;
try {
modTime = con.getDomainModTimestamp(domainName);
} catch (ResourceException ignored) {
// if the exception is due to timeout or we were not able
// to get a connection to the object store then we're
// going to use our cache as is instead of rejecting
// the operation
}
// if our cache data is same or newer than db then return
// data from the cache (it could be newer if we just updated
// the cache based on write db but during read, the server
// hasn't replicated the data yet)
if (data.getModTime() >= modTime) {
return data.getAthenzDomain();
}
cacheStore.invalidate(domainName);
return null;
}
String getPrincipalName(ResourceContext ctx) {
if (ctx == null) {
return null;
}
Principal principal = ((RsrcCtxWrapper) ctx).principal();
if (principal == null) {
return null;
}
return principal.getFullName();
}
void saveChanges(ObjectStoreConnection con, String domainName) {
// we're first going to commit our changes which will
// also set the connection in auto-commit mode. we are
// going to change the domain timestamp in auto-commit
// mode so that we don't have a contention
con.commitChanges();
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
void auditLogRequest(ResourceContext ctx, String domainName, String auditRef,
String caller, String operation, String entityName, String auditDetails) {
auditLogger.log(getAuditLogMsgBuilder(ctx, domainName, auditRef, caller, operation, entityName, auditDetails));
}
void auditLogRequest(String principal, String domainName, String auditRef,
String caller, String operation, String entityName, String auditDetails) {
AuditLogMsgBuilder msgBldr = getAuditLogMsgBuilder(null, domainName, auditRef, caller, operation, entityName, auditDetails);
msgBldr.who(principal);
auditLogger.log(msgBldr);
}
private AuditLogMsgBuilder getAuditLogMsgBuilder(ResourceContext ctx, String domainName,
String auditRef, String caller, String operation, String entityName, String auditDetails) {
AuditLogMsgBuilder msgBldr = ZMSUtils.getAuditLogMsgBuilder(ctx, auditLogger,
domainName, auditRef, caller, operation);
msgBldr.when(Timestamp.fromCurrentTime().toString()).whatEntity(entityName);
if (auditDetails != null) {
msgBldr.whatDetails(auditDetails);
}
return msgBldr;
}
Domain makeDomain(ResourceContext ctx, Domain domain, List<String> adminUsers,
List<String> solutionTemplates, String auditRef) {
final String caller = "makedomain";
final String domainName = domain.getName();
String principalName = getPrincipalName(ctx);
if (principalName == null) {
principalName = "system-account";
}
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
// get our connection object
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// before adding this domain we need to verify our
// quota check for sub-domains
quotaCheck.checkSubdomainQuota(con, domainName, caller);
boolean objectsInserted = con.insertDomain(domain);
if (!objectsInserted) {
con.rollbackChanges();
throw ZMSUtils.requestError("makeDomain: Cannot create domain: " +
domainName + " - already exists", caller);
}
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"domain\": ");
auditLogDomain(auditDetails, domain);
// first create and process the admin role
Role adminRole = ZMSUtils.makeAdminRole(domainName, adminUsers);
auditDetails.append(", \"role\": ");
if (!processRole(con, null, domainName, ZMSConsts.ADMIN_ROLE_NAME, adminRole,
principalName, auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("makeDomain: Cannot process role: '" +
adminRole.getName(), caller);
}
// now create and process the admin policy
Policy adminPolicy = ZMSUtils.makeAdminPolicy(domainName, adminRole);
auditDetails.append(", \"policy\": ");
if (!processPolicy(con, null, domainName, ZMSConsts.ADMIN_POLICY_NAME, adminPolicy,
false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("makeDomain: Cannot process policy: '" +
adminPolicy.getName(), caller);
}
// go through our list of templates and add the specified
// roles and polices to our domain
if (solutionTemplates != null) {
for (String templateName : solutionTemplates) {
auditDetails.append(", \"template\": ");
if (!addSolutionTemplate(con, domainName, templateName, principalName,
null, auditRef, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("makeDomain: Cannot apply templates: '" +
domain, caller);
}
}
}
auditDetails.append("}");
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log entry
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_POST,
domainName, auditDetails.toString());
return domain;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean processPolicy(ObjectStoreConnection con, Policy originalPolicy, String domainName,
String policyName, Policy policy, boolean ignoreDeletes, StringBuilder auditDetails) {
// check to see if we need to insert the policy or update it
boolean requestSuccess;
if (originalPolicy == null) {
requestSuccess = con.insertPolicy(domainName, policy);
} else {
requestSuccess = con.updatePolicy(domainName, policy);
}
// if we didn't update any policies then we need to return failure
if (!requestSuccess) {
return false;
}
// open our audit record
auditDetails.append("{\"name\": \"").append(policyName).append('\"');
// now we need process our policy assertions depending this is
// a new insert operation or an update
List<Assertion> newAssertions = policy.getAssertions();
if (originalPolicy == null) {
// we're just going to process our new assertions
if (newAssertions != null) {
for (Assertion assertion : newAssertions) {
if (!con.insertAssertion(domainName, policyName, assertion)) {
return false;
}
}
auditLogAssertions(auditDetails, "added-assertions", newAssertions);
}
} else {
// first we need to retrieve the current set of assertions
List<Assertion> curAssertions = originalPolicy.getAssertions();
if (curAssertions == null) {
curAssertions = new ArrayList<>();
}
List<Assertion> addAssertions = new ArrayList<>();
List<Assertion> delAssertions = new ArrayList<>();
policyAssertionChanges(newAssertions, curAssertions, addAssertions, delAssertions);
if (!ignoreDeletes) {
for (Assertion assertion : delAssertions) {
if (!con.deleteAssertion(domainName, policyName, assertion.getId())) {
return false;
}
}
auditLogAssertions(auditDetails, "deleted-assertions", delAssertions);
}
for (Assertion assertion : addAssertions) {
if (!con.insertAssertion(domainName, policyName, assertion)) {
return false;
}
}
auditLogAssertions(auditDetails, "added-assertions", addAssertions);
}
auditDetails.append('}');
return true;
}
boolean removeMatchedAssertion(Assertion assertion, List<Assertion> assertions, List<Assertion> matchedAssertions) {
AssertionEffect effect = AssertionEffect.ALLOW;
if (assertion.getEffect() != null) {
effect = assertion.getEffect();
}
Iterator<Assertion> itr = assertions.iterator();
while (itr.hasNext()) {
Assertion checkAssertion = itr.next();
if (!assertion.getAction().equals(checkAssertion.getAction())) {
continue;
}
if (!assertion.getResource().equals(checkAssertion.getResource())) {
continue;
}
if (!assertion.getRole().equals(checkAssertion.getRole())) {
continue;
}
AssertionEffect checkEffect = AssertionEffect.ALLOW;
if (checkAssertion.getEffect() != null) {
checkEffect = checkAssertion.getEffect();
}
if (effect != checkEffect) {
continue;
}
itr.remove();
matchedAssertions.add(checkAssertion);
return true;
}
return false;
}
void policyAssertionChanges(List<Assertion> newAssertions, List<Assertion> curAssertions,
List<Assertion> addAssertions, List<Assertion> delAssertions) {
// let's iterate through the new list and the ones that are
// not in the current list should be added to the add list
List<Assertion> matchedAssertions = new ArrayList<>();
if (newAssertions != null) {
for (Assertion assertion : newAssertions) {
if (!removeMatchedAssertion(assertion, curAssertions, matchedAssertions)) {
addAssertions.add(assertion);
}
}
}
// now our current list has been updated as well and
// all the assertions that were present moved to the
// matched assertion list so whatever left in the
// current list must be deleted
delAssertions.addAll(curAssertions);
// now let's go back and re-add the matched assertions
// back to our list so we can get the right audit data
curAssertions.addAll(matchedAssertions);
}
boolean processRole(ObjectStoreConnection con, Role originalRole, String domainName,
String roleName, Role role, String admin, String auditRef, boolean ignoreDeletes,
StringBuilder auditDetails) {
// check to see if we need to insert the role or update it
boolean requestSuccess;
if (originalRole == null) {
// auditEnabled can only be set with system admin privileges
role.setAuditEnabled(false);
requestSuccess = con.insertRole(domainName, role);
} else {
// carrying over auditEnabled from original role
role.setAuditEnabled(originalRole.getAuditEnabled());
requestSuccess = con.updateRole(domainName, role);
}
// if we didn't update any roles then we need to return failure
if (!requestSuccess) {
return false;
}
// open our audit record and log our trust field if one is available
auditDetails.append("{\"name\": \"").append(roleName)
.append("\", \"trust\": \"").append(role.getTrust()).append('\"');
// now we need process our role members depending this is
// a new insert operation or an update
List<RoleMember> roleMembers = role.getRoleMembers();
// support older clients which might send members field
// at this point, we expect either roleMembers or members,
// and we can't have both
List<String> members = role.getMembers();
if (members != null && !members.isEmpty()) {
roleMembers = ZMSUtils.convertMembersToRoleMembers(members);
}
if (originalRole == null) {
// we are just going to process all members as new inserts
if (roleMembers != null) {
for (RoleMember member : roleMembers) {
if (!con.insertRoleMember(domainName, roleName, member, admin, auditRef)) {
return false;
}
}
auditLogRoleMembers(auditDetails, "added-members", roleMembers);
}
} else {
processUpdateRoleMembers(con, originalRole, roleMembers, ignoreDeletes,
domainName, roleName, admin, auditRef, auditDetails);
}
auditDetails.append('}');
return true;
}
private boolean processUpdateRoleMembers(ObjectStoreConnection con, Role originalRole,
List<RoleMember> roleMembers, boolean ignoreDeletes, String domainName,
String roleName, String admin, String auditRef, StringBuilder auditDetails) {
// first we need to retrieve the current set of members
List<RoleMember> originalMembers = originalRole.getRoleMembers();
List<RoleMember> curMembers = (null == originalMembers) ? new ArrayList<>() : new ArrayList<>(originalMembers);
List<RoleMember> delMembers = new ArrayList<>(curMembers);
ArrayList<RoleMember> newMembers = (null == roleMembers) ? new ArrayList<>() : new ArrayList<>(roleMembers);
// remove current members from new members
ZMSUtils.removeMembers(newMembers, curMembers);
// remove new members from current members
// which leaves the deleted members.
ZMSUtils.removeMembers(delMembers, roleMembers);
if (!ignoreDeletes) {
for (RoleMember member : delMembers) {
if (!con.deleteRoleMember(domainName, roleName, member.getMemberName(), admin, auditRef)) {
return false;
}
}
auditLogRoleMembers(auditDetails, "deleted-members", delMembers);
}
for (RoleMember member : newMembers) {
if (!con.insertRoleMember(domainName, roleName, member, admin, auditRef)) {
return false;
}
}
auditLogRoleMembers(auditDetails, "added-members", newMembers);
return true;
}
boolean processServiceIdentity(ObjectStoreConnection con, ServiceIdentity originalService,
String domainName, String serviceName, ServiceIdentity service,
boolean ignoreDeletes, StringBuilder auditDetails) {
boolean requestSuccess;
if (originalService == null) {
// provider endpoint can only be set with system admin privileges
service.setProviderEndpoint(null);
requestSuccess = con.insertServiceIdentity(domainName, service);
} else {
// carrying over provider endpoint from original service
service.setProviderEndpoint(originalService.getProviderEndpoint());
requestSuccess = con.updateServiceIdentity(domainName, service);
}
// if we didn't update any services then we need to return failure
if (!requestSuccess) {
return false;
}
// open our audit record and log our service details
auditDetails.append("{\"name\": \"").append(serviceName).append('\"')
.append(", \"executable\": \"").append(service.getExecutable()).append('\"')
.append(", \"user\": \"").append(service.getUser()).append('\"')
.append(", \"group\": \"").append(service.getGroup()).append('\"')
.append(", \"description\": \"").append(service.getDescription()).append('\"');
// now we need process our public keys depending this is
// a new insert operation or an update
List<PublicKeyEntry> publicKeys = service.getPublicKeys();
if (originalService == null) {
// we are just going to process all public keys as new inserts
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
if (!con.insertPublicKeyEntry(domainName, serviceName, publicKey)) {
return false;
}
}
auditLogPublicKeyEntries(auditDetails, "added-publickeys", publicKeys);
}
} else {
// first we need to retrieve the current set of public keys
List<PublicKeyEntry> curPublicKeys = originalService.getPublicKeys();
Map<String, PublicKeyEntry> curPublicKeysMap = new HashMap<>();
if (curPublicKeys != null) {
for (PublicKeyEntry publicKey : curPublicKeys) {
curPublicKeysMap.put(publicKey.getId(), publicKey);
}
}
Map<String, PublicKeyEntry> publicKeysMap = new HashMap<>();
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
publicKeysMap.put(publicKey.getId(), publicKey);
}
}
Set<String> curPublicKeysSet = new HashSet<>(curPublicKeysMap.keySet());
Set<String> delPublicKeysSet = new HashSet<>(curPublicKeysSet);
Set<String> newPublicKeysSet = new HashSet<>(publicKeysMap.keySet());
newPublicKeysSet.removeAll(curPublicKeysSet);
delPublicKeysSet.removeAll(new HashSet<>(publicKeysMap.keySet()));
if (!ignoreDeletes) {
for (String publicKey : delPublicKeysSet) {
if (!con.deletePublicKeyEntry(domainName, serviceName, publicKey)) {
return false;
}
}
auditLogPublicKeyEntries(auditDetails, "deleted-publickeys", delPublicKeysSet);
}
for (String publicKey : newPublicKeysSet) {
if (!con.insertPublicKeyEntry(domainName, serviceName, publicKeysMap.get(publicKey))) {
return false;
}
}
auditLogPublicKeyEntries(auditDetails, "added-publickeys", newPublicKeysSet, publicKeysMap);
}
// now we need to process the hosts defined for this service
Set<String> curHosts;
if (originalService != null && originalService.getHosts() != null) {
curHosts = new HashSet<>(originalService.getHosts());
} else {
curHosts = new HashSet<>();
}
Set<String> newHosts;
if (service.getHosts() != null) {
newHosts = new HashSet<>(service.getHosts());
} else {
newHosts = new HashSet<>();
}
Set<String> delHosts = new HashSet<>(curHosts);
delHosts.removeAll(newHosts);
newHosts.removeAll(curHosts);
for (String host : delHosts) {
if (!con.deleteServiceHost(domainName, serviceName, host)) {
return false;
}
}
auditLogStrings(auditDetails, "deleted-hosts", delHosts);
for (String host : newHosts) {
if (!con.insertServiceHost(domainName, serviceName, host)) {
return false;
}
}
auditLogStrings(auditDetails, "added-hosts", newHosts);
auditDetails.append('}');
return true;
}
boolean shouldRetryOperation(ResourceException ex, int retryCount) {
// before doing anything else let's check to see if
// we still have the option to retry the operation
if (retryCount <= 1) {
return false;
}
// if we got a conflict result it means we either had
// no connection or deadlock was detected and as such
// the changes were aborted
boolean retry = false;
switch (ex.getCode()) {
case ResourceException.CONFLICT:
retry = true;
break;
case ResourceException.GONE:
// this error indicates that the server is reporting it is in
// read-only mode which indicates a fail-over has taken place
// and we need to clear all connections and start new ones
// this could only happen with write operations against the
// read-write object store
store.clearConnections();
retry = true;
break;
}
// if we're asked to retry then we're going to
// wait for a short period of time to allow the other
// connection to finish its work
if (retry) {
if (LOG.isDebugEnabled()) {
LOG.debug(": possible deadlock, retries available: " + retryCount);
}
ZMSUtils.threadSleep(retrySleepTime);
}
// return our response
return retry;
}
void executePutPolicy(ResourceContext ctx, String domainName, String policyName, Policy policy,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// check that quota is not exceeded
quotaCheck.checkPolicyQuota(con, domainName, policy, caller);
// retrieve our original policy
Policy originalPolicy = getPolicy(con, domainName, policyName);
// now process the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (!processPolicy(con, originalPolicy, domainName, policyName, policy, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put policy: " + policy.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
policyName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutRole(ResourceContext ctx, String domainName, String roleName, Role role,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// check that quota is not exceeded
quotaCheck.checkRoleQuota(con, domainName, role, caller);
// retrieve our original role
Role originalRole = getRole(con, domainName, roleName, false, false, false);
if (originalRole != null &&
(originalRole.getAuditEnabled() == Boolean.TRUE || originalRole.getReviewEnabled() == Boolean.TRUE)) {
throw ZMSUtils.requestError("Can not update auditEnabled and/or reviewEnabled roles", caller);
}
// now process the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (!processRole(con, originalRole, domainName, roleName, role,
principal, auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put role: " + role.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
roleName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutServiceIdentity(ResourceContext ctx, String domainName, String serviceName,
ServiceIdentity service, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// check that quota is not exceeded
quotaCheck.checkServiceIdentityQuota(con, domainName, service, caller);
// retrieve our original service identity object
ServiceIdentity originalService = getServiceIdentity(con, domainName, serviceName, false);
// now process the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (!processServiceIdentity(con, originalService, domainName, serviceName,
service, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put service: " + service.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
serviceName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutPublicKeyEntry(ResourceContext ctx, String domainName, String serviceName,
PublicKeyEntry keyEntry, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// check to see if this key already exists or not
PublicKeyEntry originalKeyEntry = con.getPublicKeyEntry(domainName, serviceName,
keyEntry.getId(), false);
// now we need verify our quota check if we know that
// that we'll be adding another public key
if (originalKeyEntry == null) {
quotaCheck.checkServiceIdentityPublicKeyQuota(con, domainName, serviceName, caller);
}
// now process the request
boolean requestSuccess;
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (originalKeyEntry == null) {
requestSuccess = con.insertPublicKeyEntry(domainName, serviceName, keyEntry);
auditDetails.append("{\"added-publicKeys\": [");
} else {
requestSuccess = con.updatePublicKeyEntry(domainName, serviceName, keyEntry);
auditDetails.append("{\"updated-publicKeys\": [");
}
if (!requestSuccess) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put public key: " + keyEntry.getId() +
" in service " + ZMSUtils.serviceResourceName(domainName, serviceName), caller);
}
// update our service and domain time-stamp and save changes
con.updateServiceIdentityModTimestamp(domainName, serviceName);
saveChanges(con, domainName);
// audit log the request
auditLogPublicKeyEntry(auditDetails, keyEntry, true);
auditDetails.append("]}");
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
serviceName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeletePublicKeyEntry(ResourceContext ctx, String domainName, String serviceName,
String keyId, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// now process the request
if (!con.deletePublicKeyEntry(domainName, serviceName, keyId)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError("unable to delete public key: " + keyId +
" in service " + ZMSUtils.serviceResourceName(domainName, serviceName), caller);
}
// update our service and domain time-stamp and save changes
con.updateServiceIdentityModTimestamp(domainName, serviceName);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"deleted-publicKeys\": [{\"id\": \"").append(keyId).append("\"}]}");
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
serviceName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean isTrustRole(Role role) {
if (role == null) {
return false;
}
return role.getTrust() != null && !role.getTrust().isEmpty();
}
void executePutMembership(ResourceContext ctx, String domainName, String roleName,
RoleMember roleMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// make sure the role auditing requires are bet
Role originalRole = con.getRole(domainName, roleName);
if (originalRole == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown role: " + roleName, caller);
}
checkRoleAuditEnabled(con, originalRole, auditRef, caller, principal);
// before inserting a member we need to verify that
// this is a group role and not a delegated one.
if (isTrustRole(originalRole)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": " + roleName +
"is a delegated role", caller);
}
// now we need verify our quota check
quotaCheck.checkRoleMembershipQuota(con, domainName, roleName, caller);
// process our insert role member support. since this is a "single"
// operation, we are not using any transactions.
if (!con.insertRoleMember(domainName, roleName, roleMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": unable to insert role member: " +
roleMember.getMemberName() + " to role: " + roleName, caller);
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName,
auditDetails.toString());
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutEntity(ResourceContext ctx, String domainName, String entityName,
Entity entity, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ENTITY);
// check that quota is not exceeded
quotaCheck.checkEntityQuota(con, domainName, entity, caller);
// check to see if this key already exists or not
Entity originalEntity = con.getEntity(domainName, entityName);
// now process the request
boolean requestSuccess;
if (originalEntity == null) {
requestSuccess = con.insertEntity(domainName, entity);
} else {
requestSuccess = con.updateEntity(domainName, entity);
}
if (!requestSuccess) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put entity: "
+ entity.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
entity.getName(), JSON.string(entity.getValue()));
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteMembership(ResourceContext ctx, String domainName, String roleName,
String normalizedMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// if this is the admin role then we need to make sure
// the admin is not himself who happens to be the last
// member in the role
if (ZMSConsts.ADMIN_ROLE_NAME.equals(roleName)) {
List<RoleMember> members = con.listRoleMembers(domainName, roleName, false);
if (members.size() == 1 && members.get(0).getMemberName().equals(normalizedMember)) {
throw ZMSUtils.forbiddenError(caller +
": Cannot delete last member of 'admin' role", caller);
}
}
// process our delete role member operation
if (!con.deleteRoleMember(domainName, roleName, normalizedMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete role member: " +
normalizedMember + " from role: " + roleName, caller);
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
roleName, "{\"member\": \"" + normalizedMember + "\"}");
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeletePendingMembership(ResourceContext ctx, String domainName, String roleName,
String normalizedMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// process our delete role member operation
if (!con.deletePendingRoleMember(domainName, roleName, normalizedMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete pending role member: " +
normalizedMember + " from role: " + roleName, caller);
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
roleName, "{\"pending-member\": \"" + normalizedMember + "\"}");
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteServiceIdentity(ResourceContext ctx, String domainName, String serviceName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// process our delete service request
if (!con.deleteServiceIdentity(domainName, serviceName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete service: " + serviceName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
serviceName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteEntity(ResourceContext ctx, String domainName, String entityName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ENTITY);
// process our delete role request
if (!con.deleteEntity(domainName, entityName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete entity: " + entityName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
entityName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteRole(ResourceContext ctx, String domainName, String roleName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ROLE);
// process our delete role request
if (!con.deleteRole(domainName, roleName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete role: " + roleName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
roleName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeletePolicy(ResourceContext ctx, String domainName, String policyName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// process our delete policy request
if (!con.deletePolicy(domainName, policyName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete policy: " + policyName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
policyName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
/**
* If the domain has audit enabled, and user did not provide the auditRef,
* an exception will be thrown
**/
void checkDomainAuditEnabled(ObjectStoreConnection con, final String domainName,
final String auditRef, final String caller, final String principal, int objectType) {
// before retrieving the domain details make sure we are
// configured to enforce audit reference field on the given
// object type
if (!auditRefSet.get(objectType)) {
return;
}
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
auditReferenceCheck(con, domain, auditRef, caller, principal);
}
void checkDomainAuditEnabled(ObjectStoreConnection con, Domain domain,
final String auditRef, final String caller, final String principal, int objectType) {
if (!auditRefSet.get(objectType)) {
return;
}
auditReferenceCheck(con, domain, auditRef, caller, principal);
}
void auditReferenceCheck(ObjectStoreConnection con, Domain domain, final String auditRef,
final String caller, final String principal) {
if (domain.getAuditEnabled() == Boolean.TRUE) {
if (auditRef == null || auditRef.length() == 0) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference required for domain: " + domain.getName(), caller);
}
if (auditReferenceValidator != null && !auditReferenceValidator.validateReference(auditRef, principal, caller)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference validation failed for domain: " + domain.getName() + ", auditRef: " + auditRef, caller);
}
}
}
void executeDeleteDomain(ResourceContext ctx, String domainName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_DOMAIN);
// now process the request
con.deleteDomain(domainName);
con.commitChanges();
cacheStore.invalidate(domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
domainName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
List<String> listPrincipals(String domainName, boolean domainOnly) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
List<String> principals = con.listPrincipals(domainName);
// if no further filtering is necessary, return the data
// right away
if (!domainOnly) {
return principals;
}
// generate our return list
List<String> users = new ArrayList<>();
// if we're asked for domain only then we need to match
// the domain name, if specified, and make sure the response
// only includes a single period/domain separator
// we need to skip an extra byte to accommodate for the
// domain separator (e.g. <domainName>.<userName>)
int prefixLength = 0;
if (domainName != null) {
prefixLength = domainName.length() + 1;
}
for (String principal : principals) {
// make sure the principal name doesn't have multiple
// components - e.g. user.joe.test since it represents
// a service or a sub-domain and we're only interested
// in actual users
if (prefixLength > 0) {
if (principal.substring(prefixLength).indexOf('.') == -1) {
users.add(principal);
}
} else {
// we have a single separator when the first index
// and the last index are the same
if (principal.indexOf('.') == principal.lastIndexOf('.')) {
users.add(principal);
}
}
}
return users;
}
}
void removePrincipalFromDomainRoles(ObjectStoreConnection con, String domainName, String principalName,
String adminUser, String auditRef) {
// extract all the roles that this principal is member of
// we have to this here so that there are records of
// entries in the role member audit logs and the domain
// entries are properly invalidated
List<PrincipalRole> roles = con.listPrincipalRoles(domainName, principalName);
// we want to check if we had any roles otherwise
// we don't want to update the domain mod timestamp
if (roles.isEmpty()) {
return;
}
for (PrincipalRole role : roles) {
final String roleName = role.getRoleName();
// process our delete role member operation
if (LOG.isDebugEnabled()) {
LOG.debug("removePrincipalFromDomainRoles: removing member {} from {}:role.{}",
principalName, domainName, roleName);
}
// we are going to ignore all errors here rather than
// rejecting the full operation
try {
con.deleteRoleMember(domainName, roleName, principalName, adminUser, auditRef);
} catch (ResourceException ex) {
LOG.error("removePrincipalFromDomainRoles: unable to remove {} from {}:role.{} - error {}",
principalName, domainName, roleName, ex.getMessage());
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
}
con.updateDomainModTimestamp(domainName);
}
void removePrincipalFromAllRoles(ObjectStoreConnection con, String principalName,
String adminUser, String auditRef) {
// extract all the roles that this principal is member of
// we have to this here so that there are records of
// entries in the role member audit logs and the domain
// entries are properly invalidated
List<PrincipalRole> roles;
try {
roles = con.listPrincipalRoles(null, principalName);
} catch (ResourceException ex) {
// if there is no such principal then we have nothing to do
if (ex.getCode() == ResourceException.NOT_FOUND) {
return;
} else {
throw ex;
}
}
for (PrincipalRole role : roles) {
final String domainName = role.getDomainName();
final String roleName = role.getRoleName();
// process our delete role member operation
if (LOG.isDebugEnabled()) {
LOG.debug("removePrincipalFromAllRoles: removing member {} from {}:role.{}",
principalName, domainName, roleName);
}
// we are going to ignore all errors here rather than
// rejecting the full operation. our delete user will
// eventually remove all these principals
try {
con.deleteRoleMember(domainName, roleName, principalName, adminUser, auditRef);
} catch (ResourceException ex) {
LOG.error("removePrincipalFromAllRoles: unable to remove {} from {}:role.{} - error {}",
principalName, domainName, roleName, ex.getMessage());
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
}
}
void removePrincipalDomains(ObjectStoreConnection con, String principalName) {
// first we're going to retrieve the list domains for
// the given user
final String domainPrefix = principalName + ".";
List<String> subDomains = con.listDomains(domainPrefix, 0);
// first we're going to delete the user domain if
// one exists and then all the sub-domains. We're not
// going to fail the operation for these steps - only
// if the actual user is not deleted
con.deleteDomain(principalName);
cacheStore.invalidate(principalName);
for (String subDomain : subDomains) {
con.deleteDomain(subDomain);
cacheStore.invalidate(subDomain);
}
}
void executeDeleteDomainRoleMember(ResourceContext ctx, String domainName,
String memberName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// remove this user from all roles manually so that we
// can have an audit log record for each role
removePrincipalFromDomainRoles(con, domainName, memberName,
getPrincipalName(ctx), auditRef);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
memberName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteUser(ResourceContext ctx, String userName, String domainName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// remove all principal domains
removePrincipalDomains(con, domainName);
// extract all principals that this user has - this would
// include the user self plus all services this user
// has created in the personal domain + sub-domains
List<String> userSvcPrincipals = con.listPrincipals(domainName);
// remove this user from all roles manually so that we
// can have an audit log record for each role
final String adminPrincipal = getPrincipalName(ctx);
removePrincipalFromAllRoles(con, userName, adminPrincipal, auditRef);
for (String userSvcPrincipal : userSvcPrincipals) {
removePrincipalFromAllRoles(con, userSvcPrincipal, adminPrincipal, auditRef);
}
// finally delete the principal object. any roles that were
// left behind will be cleaned up from this operation
if (!con.deletePrincipal(userName, true)) {
throw ZMSUtils.notFoundError(caller + ": unable to delete user: "
+ userName, caller);
}
// audit log the request
auditLogRequest(ctx, userName, auditRef, caller, ZMSConsts.HTTP_DELETE,
userName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
ServiceIdentity getServiceIdentity(String domainName, String serviceName, boolean attrsOnly) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return getServiceIdentity(con, domainName, serviceName, attrsOnly);
}
}
DomainTemplateList listDomainTemplates(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
DomainTemplateList domainTemplateList = new DomainTemplateList();
domainTemplateList.setTemplateNames(con.listDomainTemplates(domainName));
return domainTemplateList;
}
}
ServiceIdentity getServiceIdentity(ObjectStoreConnection con, String domainName,
String serviceName, boolean attrsOnly) {
ServiceIdentity service = con.getServiceIdentity(domainName, serviceName);
if (service != null && !attrsOnly) {
service.setPublicKeys(con.listPublicKeys(domainName, serviceName));
List<String> hosts = con.listServiceHosts(domainName, serviceName);
if (hosts != null && !hosts.isEmpty()) {
service.setHosts(hosts);
}
}
return service;
}
PublicKeyEntry getPublicKeyFromCache(String domainName, String serviceName, String keyId) {
DataCache data = cacheStore.getIfPresent(domainName);
if (data == null) {
return null;
}
AthenzDomain athenzDomain = data.getAthenzDomain();
if (athenzDomain == null) {
return null;
}
List<ServiceIdentity> services = athenzDomain.getServices();
if (services == null) {
return null;
}
final String fullServiceName = ZMSUtils.serviceResourceName(domainName, serviceName);
for (ServiceIdentity service : services) {
if (fullServiceName.equals(service.getName())) {
List<PublicKeyEntry> publicKeys = service.getPublicKeys();
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
if (keyId.equals(publicKey.getId())) {
return publicKey;
}
}
}
break;
}
}
return null;
}
PublicKeyEntry getServicePublicKeyEntry(String domainName, String serviceName,
String keyId, boolean domainStateCheck) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getPublicKeyEntry(domainName, serviceName, keyId, domainStateCheck);
} catch (ResourceException ex) {
if (ex.getCode() != ResourceException.SERVICE_UNAVAILABLE) {
throw ex;
}
}
// if we got this far it means we couldn't get our public key
// from our DB store either due to timeout or communication
// error so we're going to see if we have the public key in
// our cache and use that for our requests
PublicKeyEntry keyEntry = getPublicKeyFromCache(domainName, serviceName, keyId);
if (keyEntry == null) {
throw new ResourceException(ResourceException.SERVICE_UNAVAILABLE,
"Unable to retrieve public key from DB store");
}
return keyEntry;
}
public ResourceAccessList getResourceAccessList(String principal, String action) {
// this commands takes a quite a bit of time due to joining tables
// and needs to be optimized. For now we'll configure it with
// default timeout of 30 minutes to avoid any issues
try (ObjectStoreConnection con = store.getConnection(true, false)) {
con.setOperationTimeout(1800);
return con.listResourceAccess(principal, action, zmsConfig.getUserDomain());
}
}
Domain getDomain(String domainName, boolean masterCopy) {
try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) {
return con.getDomain(domainName);
}
}
List<String> listDomains(String prefix, long modifiedSince) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listDomains(prefix, modifiedSince);
}
}
DomainList lookupDomainById(String account, int productId) {
DomainList domList = new DomainList();
try (ObjectStoreConnection con = store.getConnection(true, false)) {
String domain = con.lookupDomainById(account, productId);
if (domain != null) {
List<String> list = Collections.singletonList(domain);
domList.setNames(list);
}
}
return domList;
}
DomainList lookupDomainByAccount(String account) {
return lookupDomainById(account, 0);
}
DomainList lookupDomainByProductId(Integer productId) {
return lookupDomainById(null, productId);
}
DomainList lookupDomainByRole(String roleMember, String roleName) {
DomainList domList = new DomainList();
try (ObjectStoreConnection con = store.getConnection(true, false)) {
List<String> domains = con.lookupDomainByRole(roleMember, roleName);
if (domains != null) {
domList.setNames(domains);
}
}
return domList;
}
List<String> listRoles(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listRoles(domainName);
}
}
Membership getMembership(String domainName, String roleName, String principal,
long expiryTimestamp, boolean pending) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
Membership membership = con.getRoleMember(domainName, roleName, principal, expiryTimestamp, pending);
Timestamp expiration = membership.getExpiration();
//need to check expiration and set isMember if expired
if (expiration != null && expiration.millis() < System.currentTimeMillis()) {
membership.setIsMember(false);
}
return membership;
}
}
DomainRoleMembers listDomainRoleMembers(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listDomainRoleMembers(domainName);
}
}
Role getRole(String domainName, String roleName, Boolean auditLog, Boolean expand, Boolean pending) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return getRole(con, domainName, roleName, auditLog, expand, pending);
}
}
Role getRole(ObjectStoreConnection con, String domainName, String roleName,
Boolean auditLog, Boolean expand, Boolean pending) {
Role role = con.getRole(domainName, roleName);
if (role != null) {
if (role.getTrust() == null) {
// if we have no trust field specified then we need to
// retrieve our standard group role members
role.setRoleMembers(con.listRoleMembers(domainName, roleName, pending));
// still populate the members for old clients
role.setMembers(ZMSUtils.convertRoleMembersToMembers(
role.getRoleMembers()));
if (auditLog == Boolean.TRUE) {
role.setAuditLog(con.listRoleAuditLogs(domainName, roleName));
}
} else if (expand == Boolean.TRUE) {
// otherwise, if asked, let's expand the delegated
// membership and return the list of members
role.setRoleMembers(getDelegatedRoleMembers(con, domainName, role.getTrust(), roleName));
// still populate the members for old clients
role.setMembers(ZMSUtils.convertRoleMembersToMembers(role.getRoleMembers()));
}
}
return role;
}
List<RoleMember> getDelegatedRoleMembers(ObjectStoreConnection con, final String domainName,
final String trustDomain, final String roleName) {
// verify that the domain and trust domain are not the same
if (domainName.equals(trustDomain)) {
return null;
}
// retrieve our trust domain
AthenzDomain domain = null;
try {
domain = getAthenzDomain(con, trustDomain);
} catch (ResourceException ex) {
LOG.error("unable to fetch domain {}: {}", trustDomain, ex.getMessage());
}
if (domain == null) {
return null;
}
// we need to use a set since we might be matching
// multiple assertions and we want to automatically
// skip any duplicate members
Map<String, RoleMember> roleMembers = new HashMap<>();
// generate our full role name
String fullRoleName = ZMSUtils.roleResourceName(domainName, roleName);
// iterate through all policies to see which one has the
// assume_role assertion for the given role
for (Policy policy : domain.getPolicies()) {
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
continue;
}
for (Assertion assertion : assertions) {
if (!ZMSUtils.assumeRoleResourceMatch(fullRoleName, assertion)) {
continue;
}
String rolePattern = StringUtils.patternFromGlob(assertion.getRole());
for (Role role : domain.getRoles()) {
// make sure we have members before trying to match the name
List<RoleMember> members = role.getRoleMembers();
if (members == null || members.isEmpty()) {
continue;
}
if (!role.getName().matches(rolePattern)) {
continue;
}
for (RoleMember member : members) {
String memberName = member.getMemberName();
if (!roleMembers.containsKey(memberName)) {
roleMembers.put(memberName, member);
}
}
}
}
}
return new ArrayList<>(roleMembers.values());
}
Policy getPolicy(String domainName, String policyName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return getPolicy(con, domainName, policyName);
}
}
Assertion getAssertion(String domainName, String policyName, Long assertionId) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getAssertion(domainName, policyName, assertionId);
}
}
void executePutAssertion(ResourceContext ctx, String domainName, String policyName,
Assertion assertion, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// now we need verify our quota check
quotaCheck.checkPolicyAssertionQuota(con, domainName, policyName, caller);
// process our insert assertion. since this is a "single"
// operation, we are not using any transactions.
if (!con.insertAssertion(domainName, policyName, assertion)) {
throw ZMSUtils.requestError(caller + ": unable to insert assertion: " +
" to policy: " + policyName, caller);
}
// update our policy and domain time-stamps, and invalidate local cache entry
con.updatePolicyModTimestamp(domainName, policyName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogAssertion(auditDetails, assertion, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
policyName, auditDetails.toString());
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteAssertion(ResourceContext ctx, String domainName, String policyName,
Long assertionId, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// process our delete assertion. since this is a "single"
// operation, we are not using any transactions.
if (!con.deleteAssertion(domainName, policyName, assertionId)) {
throw ZMSUtils.requestError(caller + ": unable to delete assertion: " +
assertionId + " from policy: " + policyName, caller);
}
// update our policy and domain time-stamps, and invalidate local cache entry
con.updatePolicyModTimestamp(domainName, policyName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
final String auditDetails = "{\"policy\": \"" + policyName +
"\", \"assertionId\": \"" + assertionId + "\"}";
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
policyName, auditDetails);
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
List<String> listEntities(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listEntities(domainName);
}
}
Entity getEntity(String domainName, String entityName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getEntity(domainName, entityName);
}
}
Policy getPolicy(ObjectStoreConnection con, String domainName, String policyName) {
Policy policy = con.getPolicy(domainName, policyName);
if (policy != null) {
policy.setAssertions(con.listAssertions(domainName, policyName));
}
return policy;
}
List<String> listPolicies(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listPolicies(domainName, null);
}
}
List<String> listServiceIdentities(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listServiceIdentities(domainName);
}
}
void executePutDomainMeta(ResourceContext ctx, String domainName, DomainMeta meta,
final String systemAttribute, boolean deleteAllowed, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_DOMAIN);
// now process the request. first we're going to make a
// copy of our domain
Domain updatedDomain = new Domain()
.setName(domain.getName())
.setEnabled(domain.getEnabled())
.setId(domain.getId())
.setAuditEnabled(domain.getAuditEnabled())
.setDescription(domain.getDescription())
.setOrg(domain.getOrg())
.setApplicationId(domain.getApplicationId())
.setAccount(domain.getAccount())
.setYpmId(domain.getYpmId())
.setCertDnsDomain(domain.getCertDnsDomain())
.setMemberExpiryDays(domain.getMemberExpiryDays())
.setServiceExpiryDays(domain.getServiceExpiryDays())
.setTokenExpiryMins(domain.getTokenExpiryMins())
.setRoleCertExpiryMins(domain.getRoleCertExpiryMins())
.setServiceCertExpiryMins(domain.getServiceCertExpiryMins())
.setSignAlgorithm(domain.getSignAlgorithm());
// then we're going to apply the updated fields
// from the given object
if (systemAttribute != null) {
updateSystemMetaFields(updatedDomain, systemAttribute, deleteAllowed, meta);
} else {
updateDomainMetaFields(updatedDomain, meta);
}
con.updateDomain(updatedDomain);
con.commitChanges();
cacheStore.invalidate(domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogDomain(auditDetails, updatedDomain);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
// if the domain member expiry date has changed then we're going
// process all the members in the domain and update the expiration
// date accordingly
updateDomainMembersExpiration(ctx, con, domain, updatedDomain, auditRef, caller);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void updateDomainMembersExpiration(ResourceContext ctx, ObjectStoreConnection con, Domain domain,
Domain updatedDomain, String auditRef, String caller) {
// we only need to process the domain role members if the new expiration
// is more restrictive than what we had before
boolean userMemberExpiryDayReduced = isNumOfDaysReduced(domain.getMemberExpiryDays(),
updatedDomain.getMemberExpiryDays());
boolean serviceMemberExpiryDayReduced = isNumOfDaysReduced(domain.getServiceExpiryDays(),
updatedDomain.getServiceExpiryDays());
if (!userMemberExpiryDayReduced && !serviceMemberExpiryDayReduced) {
return;
}
AthenzDomain athenzDomain;
try {
athenzDomain = getAthenzDomain(con, domain.getName());
} catch (ResourceException ex) {
LOG.error("unable to fetch domain {}: {}", domain.getName(), ex.getMessage());
return;
}
long userExpiryMillis = userMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedDomain.getMemberExpiryDays(), TimeUnit.DAYS) : 0;
long serviceExpiryMillis = serviceMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedDomain.getServiceExpiryDays(), TimeUnit.DAYS) : 0;
Timestamp userExpiration = Timestamp.fromMillis(userExpiryMillis);
Timestamp serviceExpiration = Timestamp.fromMillis(serviceExpiryMillis);
final String principal = getPrincipalName(ctx);
for (Role role : athenzDomain.getRoles()) {
// if the role already has a specific expiry date set then we
// will automatically skip this role
if (role.getMemberExpiryDays() != null || role.getServiceExpiryDays() != null) {
continue;
}
// if it's a delegated role then we have nothing to do
if (role.getTrust() != null && !role.getTrust().isEmpty()) {
continue;
}
// if no role members, then there is nothing to do
final List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers == null || roleMembers.isEmpty()) {
continue;
}
// process our role members and if there were any changes processed then update
// our role and domain time-stamps, and invalidate local cache entry
final String roleName = AthenzUtils.extractRoleName(role.getName());
List<RoleMember> roleMembersWithUpdatedDueDates = getRoleMembersWithUpdatedDueDates(roleMembers,
userExpiration, userExpiryMillis, serviceExpiration, serviceExpiryMillis,
null, 0, null, 0, null);
if (insertRoleMembers(ctx, con, roleMembersWithUpdatedDueDates, domain.getName(),
roleName, principal, auditRef, caller)) {
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domain.getName(), roleName);
con.updateDomainModTimestamp(domain.getName());
cacheStore.invalidate(domain.getName());
}
}
}
void updateDomainMetaFields(Domain domain, DomainMeta meta) {
domain.setApplicationId(meta.getApplicationId());
domain.setDescription(meta.getDescription());
if (meta.getMemberExpiryDays() != null) {
domain.setMemberExpiryDays(meta.getMemberExpiryDays());
}
if (meta.getServiceExpiryDays() != null) {
domain.setServiceExpiryDays(meta.getServiceExpiryDays());
}
if (meta.getRoleCertExpiryMins() != null) {
domain.setRoleCertExpiryMins(meta.getRoleCertExpiryMins());
}
if (meta.getServiceCertExpiryMins() != null) {
domain.setServiceCertExpiryMins(meta.getServiceCertExpiryMins());
}
if (meta.getTokenExpiryMins() != null) {
domain.setTokenExpiryMins(meta.getTokenExpiryMins());
}
if (meta.getSignAlgorithm() != null) {
domain.setSignAlgorithm(meta.getSignAlgorithm());
}
}
boolean isDeleteSystemMetaAllowed(boolean deleteAllowed, Object oldValue, Object newValue) {
// if authorized or old value is not set, then there is
// no need to check any value
if (deleteAllowed || oldValue == null) {
return true;
}
// since our old value is not null then we will only
// allow if the new value is identical
return (newValue != null) ? oldValue.equals(newValue) : false;
}
void updateSystemMetaFields(Domain domain, final String attribute, boolean deleteAllowed,
DomainMeta meta) {
final String caller = "putdomainsystemmeta";
// system attributes we'll only set if they're available
// in the given object
switch (attribute) {
case ZMSConsts.SYSTEM_META_ACCOUNT:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getAccount(), meta.getAccount())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setAccount(meta.getAccount());
break;
case ZMSConsts.SYSTEM_META_PRODUCT_ID:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getYpmId(), meta.getYpmId())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setYpmId(meta.getYpmId());
break;
case ZMSConsts.SYSTEM_META_CERT_DNS_DOMAIN:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getCertDnsDomain(), meta.getCertDnsDomain())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setCertDnsDomain(meta.getCertDnsDomain());
break;
case ZMSConsts.SYSTEM_META_ORG:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getOrg(), meta.getOrg())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setOrg(meta.getOrg());
break;
case ZMSConsts.SYSTEM_META_AUDIT_ENABLED:
domain.setAuditEnabled(meta.getAuditEnabled());
break;
case ZMSConsts.SYSTEM_META_ENABLED:
domain.setEnabled(meta.getEnabled());
break;
default:
throw ZMSUtils.requestError("unknown system meta attribute: " + attribute, caller);
}
}
void updateRoleSystemMetaFields(Role role, final String attribute, boolean deleteAllowed, RoleSystemMeta meta) {
final String caller = "putrolesystemmeta";
// system attributes we'll only set if they're available
// in the given object
switch (attribute) {
case ZMSConsts.SYSTEM_META_AUDIT_ENABLED:
role.setAuditEnabled(meta.getAuditEnabled());
break;
default:
throw ZMSUtils.requestError("unknown role system meta attribute: " + attribute, caller);
}
}
void updateServiceIdentitySystemMetaFields(ServiceIdentity service, final String attribute,
boolean deleteAllowed, ServiceIdentitySystemMeta meta) {
final String caller = "putserviceidentitysystemmeta";
// system attributes we'll only set if they're available
// in the given object
switch (attribute) {
case ZMSConsts.SYSTEM_META_PROVIDER_ENDPOINT:
service.setProviderEndpoint(meta.getProviderEndpoint());
break;
default:
throw ZMSUtils.requestError("unknown service system meta attribute: " + attribute, caller);
}
}
void executePutDomainTemplate(ResourceContext ctx, String domainName, DomainTemplate domainTemplate,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TEMPLATE);
// go through our list of templates and add the specified
// roles and polices to our domain
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"add-templates\": ");
boolean firstEntry = true;
for (String templateName : domainTemplate.getTemplateNames()) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
if (!addSolutionTemplate(con, domainName, templateName, getPrincipalName(ctx),
domainTemplate.getParams(), auditRef, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put domain templates: " + domainName, caller);
}
}
auditDetails.append("}");
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteDomainTemplate(ResourceContext ctx, String domainName, String templateName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TEMPLATE);
// go through our list of templates and add the specified
// roles and polices to our domain
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"templates\": ");
Template template = zmsConfig.getServerSolutionTemplates().get(templateName);
if (!deleteSolutionTemplate(con, domainName, templateName, template, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to delete domain template: " + domainName, caller);
}
auditDetails.append("}");
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean addSolutionTemplate(ObjectStoreConnection con, String domainName, String templateName,
String admin, List<TemplateParam> templateParams, String auditRef, StringBuilder auditDetails) {
auditDetails.append("{\"name\": \"").append(templateName).append('\"');
// we have already verified that our template is valid but
// we'll just double check to make sure it's not null
Template template = zmsConfig.getServerSolutionTemplates().get(templateName);
if (template == null) {
auditDetails.append("}");
return true;
}
boolean firstEntry = true;
// iterate through roles in the list.
// When adding a template, if the role does not exist in our domain
// then insert it otherwise only apply the changes to the member list.
List<Role> templateRoles = template.getRoles();
if (templateRoles != null) {
for (Role role : templateRoles) {
Role templateRole = updateTemplateRole(role, domainName, templateParams);
String roleName = ZMSUtils.removeDomainPrefix(templateRole.getName(),
domainName, ROLE_PREFIX);
// retrieve our original role
Role originalRole = getRole(con, domainName, roleName, false, false, false);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"add-role\": ");
if (!processRole(con, originalRole, domainName, roleName, templateRole,
admin, auditRef, true, auditDetails)) {
return false;
}
}
}
// iterate through policies in the list.
// When adding a template, if the policy does not exist in our domain
// then insert it otherwise only apply the changes to the assertions
List<Policy> templatePolicies = template.getPolicies();
if (templatePolicies != null) {
for (Policy policy : templatePolicies) {
Policy templatePolicy = updateTemplatePolicy(policy, domainName, templateParams);
String policyName = ZMSUtils.removeDomainPrefix(templatePolicy.getName(),
domainName, POLICY_PREFIX);
// retrieve our original policy
Policy originalPolicy = getPolicy(con, domainName, policyName);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"add-policy\": ");
if (!processPolicy(con, originalPolicy, domainName, policyName, templatePolicy,
true, auditDetails)) {
return false;
}
}
}
// iterate through service identities in the list.
// When adding a template, if the service identity does not exist in our domain
// then insert it otherwise only apply the changes
List<ServiceIdentity> templateServiceIdentities = template.getServices();
if (templateServiceIdentities != null) {
for (ServiceIdentity serviceIdentity : templateServiceIdentities) {
ServiceIdentity templateServiceIdentity = updateTemplateServiceIdentity(
serviceIdentity, domainName, templateParams);
String serviceIdentityName = ZMSUtils.removeDomainPrefixForService(
templateServiceIdentity.getName(), domainName);
// retrieve our original service
ServiceIdentity originalServiceIdentity = getServiceIdentity(con, domainName,
serviceIdentityName, false);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"add-service\": ");
if (!processServiceIdentity(con, originalServiceIdentity, domainName,
serviceIdentityName, templateServiceIdentity, true, auditDetails)) {
return false;
}
}
}
// if adding a template, only add if it is not in our current list
// check to see if the template is already listed for the domain
List<String> currentTemplateList = con.listDomainTemplates(domainName);
if (!currentTemplateList.contains(templateName)) {
con.insertDomainTemplate(domainName, templateName, null);
}
//on both insert and update templates, bump up the version of the template to latest version.
if (template.getMetadata().getLatestVersion() != null) {
con.updateDomainTemplate(domainName, templateName, template.getMetadata());
}
auditDetails.append("}");
return true;
}
boolean deleteSolutionTemplate(ObjectStoreConnection con, String domainName, String templateName,
Template template, StringBuilder auditDetails) {
// currently there is no support for dynamic templates since the
// DELETE request has no payload and we can't pass our parameters
auditDetails.append("{\"name\": \"").append(templateName).append('\"');
// we have already verified that our template is valid but
// we'll just double check to make sure it's not null
if (template == null) {
auditDetails.append("}");
return true;
}
boolean firstEntry = true;
// iterate through roles in the list and delete the role
List<Role> templateRoles = template.getRoles();
if (templateRoles != null) {
for (Role role : templateRoles) {
String roleName = ZMSUtils.removeDomainPrefix(role.getName(),
TEMPLATE_DOMAIN_NAME, ROLE_PREFIX);
con.deleteRole(domainName, roleName);
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"delete-role\": \"").append(roleName).append('\"');
}
}
// iterate through policies in the list and delete the policy
List<Policy> templatePolicies = template.getPolicies();
if (templatePolicies != null) {
for (Policy policy : templatePolicies) {
String policyName = ZMSUtils.removeDomainPrefix(policy.getName(),
TEMPLATE_DOMAIN_NAME, POLICY_PREFIX);
con.deletePolicy(domainName, policyName);
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"delete-policy\": \"").append(policyName).append('\"');
}
}
// iterate through services in the list and delete the service
List<ServiceIdentity> templateServices = template.getServices();
if (templateServices != null) {
for (ServiceIdentity serviceIdentity : templateServices) {
String serviceName = ZMSUtils.removeDomainPrefixForService(serviceIdentity.getName(),
TEMPLATE_DOMAIN_NAME);
con.deleteServiceIdentity(domainName, serviceName);
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"delete-service\": \"").append(serviceName).append('\"');
}
}
// delete the template from the current list
con.deleteDomainTemplate(domainName, templateName, null);
auditDetails.append("}");
return true;
}
Role updateTemplateRole(Role role, String domainName, List<TemplateParam> params) {
// first process our given role name and carry out any
// requested substitutions
String templateRoleName = role.getName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
templateRoleName = templateRoleName.replace(paramKey, param.getValue());
}
}
Role templateRole = new Role()
.setName(templateRoleName)
.setTrust(role.getTrust());
List<RoleMember> roleMembers = role.getRoleMembers();
List<RoleMember> newMembers = new ArrayList<>();
if (roleMembers != null && !roleMembers.isEmpty()) {
for (RoleMember roleMember : roleMembers) {
RoleMember newRoleMember = new RoleMember();
// process our role members for any requested substitutions
String memberName = roleMember.getMemberName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
memberName = memberName.replace(paramKey, param.getValue());
}
}
newRoleMember.setMemberName(memberName);
newRoleMember.setExpiration(roleMember.getExpiration());
newMembers.add(newRoleMember);
}
}
templateRole.setRoleMembers(newMembers);
return templateRole;
}
Policy updateTemplatePolicy(Policy policy, String domainName, List<TemplateParam> params) {
// first process our given role name and carry out any
// requested substitutions
String templatePolicyName = policy.getName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
templatePolicyName = templatePolicyName.replace(paramKey, param.getValue());
}
}
Policy templatePolicy = new Policy().setName(templatePolicyName);
List<Assertion> assertions = policy.getAssertions();
List<Assertion> newAssertions = new ArrayList<>();
if (assertions != null && !assertions.isEmpty()) {
for (Assertion assertion : assertions) {
Assertion newAssertion = new Assertion();
newAssertion.setAction(assertion.getAction());
newAssertion.setEffect(assertion.getEffect());
// process our assertion resource and role for any requested substitutions
String resource = assertion.getResource().replace(TEMPLATE_DOMAIN_NAME, domainName);
String role = assertion.getRole().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
resource = resource.replace(paramKey, param.getValue());
role = role.replace(paramKey, param.getValue());
}
}
newAssertion.setResource(resource);
newAssertion.setRole(role);
newAssertions.add(newAssertion);
}
}
templatePolicy.setAssertions(newAssertions);
return templatePolicy;
}
ServiceIdentity updateTemplateServiceIdentity(ServiceIdentity serviceIdentity,
String domainName, List<TemplateParam> params) {
String templateServiceName = serviceIdentity.getName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
templateServiceName = templateServiceName.replace(paramKey, param.getValue());
}
}
ServiceIdentity templateServiceIdentity = new ServiceIdentity().setName(templateServiceName);
templateServiceIdentity.setDescription(serviceIdentity.getDescription());
templateServiceIdentity.setExecutable(serviceIdentity.getExecutable());
templateServiceIdentity.setGroup(serviceIdentity.getGroup());
templateServiceIdentity.setUser(serviceIdentity.getUser());
templateServiceIdentity.setProviderEndpoint(serviceIdentity.getProviderEndpoint());
List<PublicKeyEntry> publicKeyEntries = serviceIdentity.getPublicKeys();
List<PublicKeyEntry> newPublicKeyEntries = new ArrayList<>();
if (publicKeyEntries != null && !publicKeyEntries.isEmpty()) {
for (PublicKeyEntry publicKeyEntry : publicKeyEntries) {
PublicKeyEntry newPublicKeyEntry = new PublicKeyEntry();
newPublicKeyEntry.setId(publicKeyEntry.getId());
newPublicKeyEntry.setKey(publicKeyEntry.getKey());
newPublicKeyEntries.add(newPublicKeyEntry);
}
}
templateServiceIdentity.setPublicKeys(newPublicKeyEntries);
List<String> hosts = serviceIdentity.getHosts();
if (hosts != null) {
templateServiceIdentity.setHosts(new ArrayList<>(hosts));
}
return templateServiceIdentity;
}
void setupTenantAdminPolicy(String tenantDomain, String provSvcDomain,
String provSvcName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, tenantDomain, auditRef, caller, provSvcDomain + "." + provSvcName, AUDIT_TYPE_TENANCY);
String domainAdminRole = ZMSUtils.roleResourceName(tenantDomain, ZMSConsts.ADMIN_ROLE_NAME);
String serviceRoleResourceName = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain,
provSvcName, tenantDomain, null) + ZMSConsts.ADMIN_ROLE_NAME;
// our tenant admin role/policy name
final String tenancyResource = "tenancy." + provSvcDomain + '.' + provSvcName;
String adminName = tenancyResource + ".admin";
String tenantAdminRole = ZMSUtils.roleResourceName(tenantDomain, adminName);
// tenant admin role - if it already exists then we skip it
// by default it has no members.
if (con.getRole(tenantDomain, adminName) == null) {
con.insertRole(tenantDomain, new Role().setName(tenantAdminRole));
}
// tenant admin policy - check to see if this already exists. If it does
// then we don't have anything to do
if (con.getPolicy(tenantDomain, adminName) == null) {
Policy adminPolicy = new Policy().setName(ZMSUtils.policyResourceName(tenantDomain, adminName));
con.insertPolicy(tenantDomain, adminPolicy);
// we are going to create 2 assertions - one for the domain admin role
// and another for the tenant admin role
Assertion assertion = new Assertion().setRole(domainAdminRole)
.setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE)
.setEffect(AssertionEffect.ALLOW);
con.insertAssertion(tenantDomain, adminName, assertion);
assertion = new Assertion().setRole(tenantAdminRole)
.setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE)
.setEffect(AssertionEffect.ALLOW);
con.insertAssertion(tenantDomain, adminName, assertion);
// the tenant admin role must have the capability to provision
// new resource groups in the domain which requires update
// action capability on resource tenancy.<prov_domain>.<prov_svc>
String tenantResourceName = tenantDomain + ":" + tenancyResource;
assertion = new Assertion().setRole(tenantAdminRole)
.setResource(tenantResourceName).setAction(ZMSConsts.ACTION_UPDATE)
.setEffect(AssertionEffect.ALLOW);
con.insertAssertion(tenantDomain, adminName, assertion);
}
// update our domain time-stamp and save changes
saveChanges(con, tenantDomain);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutTenantRoles(ResourceContext ctx, String provSvcDomain, String provSvcName, String tenantDomain,
String resourceGroup, List<TenantRoleAction> roles, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, provSvcDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
String trustedRolePrefix = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain,
provSvcName, tenantDomain, resourceGroup);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"put-tenant-roles\": [");
boolean firstEntry = true;
for (TenantRoleAction ra : roles) {
String tenantRole = ra.getRole();
String tenantAction = ra.getAction();
String trustedRole = trustedRolePrefix + tenantRole;
String trustedName = trustedRole.substring((provSvcDomain + AuthorityConsts.ROLE_SEP).length());
Role role = new Role().setName(trustedRole).setTrust(tenantDomain);
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": add trusted Role to domain " + provSvcDomain +
": " + trustedRole + " -> " + role);
}
// retrieve our original role in case one exists
Role originalRole = getRole(con, provSvcDomain, trustedName, false, false, false);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"role\": ");
if (!processRole(con, originalRole, provSvcDomain, trustedName, role,
getPrincipalName(ctx), auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put role: " + trustedRole, caller);
}
String policyResourceName = ZMSUtils.policyResourceName(provSvcDomain, trustedName);
final String resourceName = provSvcDomain + ":service." +
ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup) + '*';
List<Assertion> assertions = Collections.singletonList(
new Assertion().setRole(trustedRole)
.setResource(resourceName)
.setAction(tenantAction));
Policy policy = new Policy().setName(policyResourceName).setAssertions(assertions);
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": add trust policy to domain " + provSvcDomain +
": " + trustedRole + " -> " + policy);
}
// retrieve our original policy
Policy originalPolicy = getPolicy(con, provSvcDomain, trustedName);
// now process the request
auditDetails.append(", \"policy\": ");
if (!processPolicy(con, originalPolicy, provSvcDomain, trustedName, policy, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put policy: " + policy.getName(), caller);
}
auditDetails.append('}');
}
// update our domain time-stamp and save changes
saveChanges(con, provSvcDomain);
// audit log the request
auditLogRequest(ctx, provSvcDomain, auditRef, caller, ZMSConsts.HTTP_PUT,
tenantDomain, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void addAssumeRolePolicy(ObjectStoreConnection con, String rolePrefix,
String trustedRolePrefix, String role, List<RoleMember> roleMembers,
String tenantDomain, String admin, String auditRef,
StringBuilder auditDetails, String caller) {
// first create the role in the domain. We're going to create it
// only if the role does not already exist
String roleName = rolePrefix + role;
String roleResourceName = ZMSUtils.roleResourceName(tenantDomain, roleName);
// retrieve our original role in case one exists
Role originalRole = getRole(con, tenantDomain, roleName, false, false, false);
// we need to add the original role members to the new one
if (originalRole != null && originalRole.getRoleMembers() != null) {
roleMembers.addAll(originalRole.getRoleMembers());
}
// now process the request
Role roleObj = new Role().setName(roleResourceName).setRoleMembers(roleMembers);
auditDetails.append("{\"role\": ");
if (!processRole(con, originalRole, tenantDomain, roleName, roleObj,
admin, auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put role: " + roleName, caller);
}
// now create the corresponding policy. We're going to create it
// only if the policy does not exist otherwise we'll just
// add a new assertion
String policyName = "tenancy." + roleName;
String policyResourceName = ZMSUtils.policyResourceName(tenantDomain, policyName);
String serviceRoleResourceName = trustedRolePrefix + role;
Assertion assertion = new Assertion().setRole(roleResourceName)
.setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE)
.setEffect(AssertionEffect.ALLOW);
if (LOG.isInfoEnabled()) {
LOG.info("executePutProviderRoles: ---- ASSUME_ROLE policyName is " + policyName);
}
// retrieve our original policy
Policy originalPolicy = getPolicy(con, tenantDomain, policyName);
// we need to add the original policy assertions to the new one
List<Assertion> newAssertions = new ArrayList<>();
if (originalPolicy != null && originalPolicy.getAssertions() != null) {
newAssertions.addAll(originalPolicy.getAssertions());
}
// if our new assertion is not already in the list then that will be added to
if (!newAssertions.contains(assertion)) {
newAssertions.add(assertion);
}
// now process the request
Policy assumeRolePolicy = new Policy().setName(policyResourceName).setAssertions(newAssertions);
auditDetails.append(", \"policy\": ");
if (!processPolicy(con, originalPolicy, tenantDomain, policyName, assumeRolePolicy,
false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put policy: " +
assumeRolePolicy.getName(), caller);
}
auditDetails.append('}');
}
void executePutProviderRoles(ResourceContext ctx, String tenantDomain, String provSvcDomain,
String provSvcName, String resourceGroup, List<String> roles, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, tenantDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
// we're going to create a separate role for each one of tenant roles returned
// based on its action and set the caller as a member in each role
final String principalName = getPrincipalName(ctx);
// now set up the roles and policies for all the provider roles returned.
final String rolePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain,
provSvcName, resourceGroup);
final String trustedRolePrefix = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain,
provSvcName, tenantDomain, resourceGroup);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"put-provider-roles\": [");
boolean firstEntry = true;
for (String role : roles) {
// we need to create a new object for each role since the list is updated
// in case the role already has existing members, but we don't want to
// add those members to other roles in our list
List<RoleMember> roleMembers = new ArrayList<>();
if (principalName != null) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(principalName);
roleMembers.add(roleMember);
}
role = role.toLowerCase();
if (LOG.isInfoEnabled()) {
LOG.info("executePutProviderRoles: provision ASSUME_ROLE policy for access remote role in "
+ provSvcDomain + "." + provSvcName + ": " + resourceGroup + "." + role);
}
firstEntry = auditLogSeparator(auditDetails, firstEntry);
addAssumeRolePolicy(con, rolePrefix, trustedRolePrefix, role, roleMembers,
tenantDomain, principalName, auditRef, auditDetails, caller);
}
auditDetails.append("]}");
// update our domain time-stamp and save changes
saveChanges(con, tenantDomain);
// audit log the request
auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_PUT,
provSvcDomain, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteTenancy(ResourceContext ctx, String tenantDomain, String provSvcDomain,
String provSvcName, String resourceGroup, String auditRef, String caller) {
// create list of policies and delete them from the tenant domain
// have to get all policies that match "tenant.<provider>.*"
// ex: tenancy.weather.storage.admin
String rnamePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain, provSvcName,
resourceGroup);
final String pnamePrefix = "tenancy." + rnamePrefix;
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, tenantDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
// first let's process and remove any policies that start with our
// provider prefix
List<String> pnames = con.listPolicies(tenantDomain, null);
for (String pname : pnames) {
if (!validResourceGroupObjectToDelete(pname, pnamePrefix)) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": --ignore policy " + pname);
}
continue;
}
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": --delete policy " + pname);
}
con.deletePolicy(tenantDomain, pname);
}
// now we're going to find any roles that have the provider prefix as
// well but we're going to be careful about removing them. We'll check
// and if we have no more policies referencing them then we'll remove
List<String> rnames = con.listRoles(tenantDomain);
for (String rname : rnames) {
if (!validResourceGroupObjectToDelete(rname, rnamePrefix)) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": --ignore role " + rname);
}
continue;
}
if (!con.listPolicies(tenantDomain, rname).isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": --ignore role " + rname + " due to active references");
}
continue;
}
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": --delete role " + rname);
}
con.deleteRole(tenantDomain, rname);
}
// update our domain time-stamp and save changes
saveChanges(con, tenantDomain);
// audit log the request
auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_DELETE,
ZMSUtils.entityResourceName(provSvcDomain, provSvcName), null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean validResourceGroupObjectToDelete(String name, String prefix) {
if (!name.startsWith(prefix)) {
return false;
}
// the suffix must be the action which should only be
// simple-name thus it cannot contain any more .'s
// otherwise we don't want to make a mistake
// and match substring resource groups - e.g:
// system.engine and system.engine.test
return (name.indexOf('.', prefix.length()) == -1);
}
void executeDeleteTenantRoles(ResourceContext ctx, String provSvcDomain, String provSvcName,
String tenantDomain, String resourceGroup, String auditRef, String caller) {
// look for this tenants roles, ex: storage.tenant.sports.reader
String rolePrefix = ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup);
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, provSvcDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
// find roles and policies matching the prefix
List<String> rnames = con.listRoles(provSvcDomain);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"tenant-roles\": [");
boolean firstEntry = true;
for (String rname : rnames) {
if (isTrustRoleForTenant(con, provSvcDomain, rname, rolePrefix,
resourceGroup, tenantDomain)) {
// good, its exactly what we are looking for
con.deleteRole(provSvcDomain, rname);
con.deletePolicy(provSvcDomain, rname);
firstEntry = auditLogString(auditDetails, rname, firstEntry);
}
}
auditDetails.append("]}");
// update our domain time-stamp and save changes
saveChanges(con, provSvcDomain);
// audit log the request
auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_DELETE,
provSvcDomain, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean isTrustRoleForTenant(ObjectStoreConnection con, String provSvcDomain, String roleName,
String rolePrefix, String resourceGroup, String tenantDomain) {
// first make sure the role name starts with the given prefix
if (!isTenantRolePrefixMatch(con, roleName, rolePrefix, resourceGroup, tenantDomain)) {
return false;
}
Role role = con.getRole(provSvcDomain, roleName);
if (role == null) {
return false;
}
// ensure it is a trust role for the tenant
String trustDom = role.getTrust();
return trustDom != null && trustDom.equals(tenantDomain);
}
boolean isTrustRoleForTenant(String provSvcDomain, String roleName, String rolePrefix,
String resourceGroup, String tenantDomain) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return isTrustRoleForTenant(con, provSvcDomain, roleName, rolePrefix, resourceGroup, tenantDomain);
}
}
boolean isTenantRolePrefixMatch(String roleName, String rolePrefix, String resourceGroup,
String tenantDomain) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return isTenantRolePrefixMatch(con, roleName, rolePrefix, resourceGroup, tenantDomain);
}
}
boolean isTenantRolePrefixMatch(ObjectStoreConnection con, String roleName, String rolePrefix,
String resourceGroup, String tenantDomain) {
if (LOG.isDebugEnabled()) {
LOG.debug("isTenantRolePrefixMatch: role-name=" + roleName + ", role-prefix=" +
rolePrefix + ", reosurce-group=" + resourceGroup + ", tenant-domain=" + tenantDomain);
}
// first make sure the role name starts with the given prefix
if (!roleName.startsWith(rolePrefix)) {
return false;
}
// if we're dealing with a resource group then we need
// to make sure we're not going to match a substring
// resource group. Since we expect to see a SimpleName
// action after the name, if we get another '.' then
// we're dealing with a substring so the role does
// match the expected format
if (resourceGroup != null) {
return (roleName.indexOf('.', rolePrefix.length()) == -1);
}
// otherwise we're going to split the remaining value
// into components. If we have 2 components then we'll
// check if we have a domain for the first component
// if we don't then it's a resource group and as such
// it can be removed otherwise, we'll leave it alone
String[] comps = roleName.substring(rolePrefix.length()).split("\\.");
if (comps.length == 2) {
// check to see if we have a subdomain - if we do then
// we're not going to include this role as we don't know
// for sure if this for a resource group or not
String subDomain = tenantDomain + "." + comps[0];
if (LOG.isDebugEnabled()) {
LOG.debug("isTenantRolePrefixMatch: verifying tenant subdomain: " + subDomain);
}
return con.getDomain(subDomain) == null;
} else {
// if we have more than 2 subcomponents then we're
// definitely not dealing with resource groups
return comps.length <= 2;
}
}
public AthenzDomain getAthenzDomain(final String domainName, boolean masterCopy) {
try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) {
return getAthenzDomain(con, domainName);
}
}
AthenzDomain getAthenzDomain(ObjectStoreConnection con, final String domainName) {
// first check to see if we our data is in the cache
AthenzDomain athenzDomain = getAthenzDomainFromCache(con, domainName);
if (athenzDomain != null) {
return athenzDomain;
}
athenzDomain = con.getAthenzDomain(domainName);
setMembersInDomain(athenzDomain);
DataCache dataCache = new DataCache(athenzDomain,
athenzDomain.getDomain().getModified().millis());
cacheStore.put(domainName, dataCache);
return athenzDomain;
}
private void setMembersInDomain(AthenzDomain athenzDomain) {
List<Role> roleList = athenzDomain.getRoles();
if (roleList != null) {
for (Role role: roleList) {
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers != null) {
List<String> members = role.getMembers();
if (members == null) {
members = new ArrayList<>();
role.setMembers(members);
}
for (RoleMember roleMember: roleMembers) {
members.add(roleMember.getMemberName());
}
}
}
}
}
DomainMetaList listModifiedDomains(long modifiedSince) {
// since this is the operation executed by ZTS servers to
// retrieve latest domain changes, we're going to use
// the read-write store as oppose to read-only store to
// get our up-to-date data
try (ObjectStoreConnection con = store.getConnection(true, true)) {
return con.listModifiedDomains(modifiedSince);
}
}
boolean auditLogSeparator(StringBuilder auditDetails, boolean firstEntry) {
if (!firstEntry) {
auditDetails.append(',');
}
// regardless of the current state, the new state is no
// longer the first entry so we return false
return false;
}
void auditLogStrings(StringBuilder auditDetails, String label, Collection<String> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (String value : values) {
firstEntry = auditLogString(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogString(StringBuilder auditDetails, String value, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append('\"').append(value).append('\"');
return firstEntry;
}
void auditLogRoleMembers(StringBuilder auditDetails, String label,
Collection<RoleMember> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (RoleMember value : values) {
firstEntry = auditLogRoleMember(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogRoleMember(StringBuilder auditDetails, RoleMember roleMember, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"member\": \"").append(roleMember.getMemberName()).append('"');
if (roleMember.getExpiration() != null) {
auditDetails.append(", \"expiration\": \"").append(roleMember.getExpiration().toString()).append('"');
}
auditDetails.append(", \"approved\": ");
auditDetails.append(roleMember.getApproved() == Boolean.FALSE ? "false" : "true");
auditDetails.append(", \"system-disabled\": ");
auditDetails.append(roleMember.getSystemDisabled() == null ? 0 : roleMember.getSystemDisabled());
auditDetails.append("}");
return firstEntry;
}
void auditLogPublicKeyEntries(StringBuilder auditDetails, String label,
List<PublicKeyEntry> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (PublicKeyEntry value : values) {
firstEntry = auditLogPublicKeyEntry(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, Set<String> values,
Map<String, PublicKeyEntry> publicKeysMap) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (String value : values) {
firstEntry = auditLogPublicKeyEntry(auditDetails, publicKeysMap.get(value), firstEntry);
}
auditDetails.append(']');
}
void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, Set<String> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (String value : values) {
firstEntry = auditLogPublicKeyEntry(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogPublicKeyEntry(StringBuilder auditDetails, PublicKeyEntry publicKey, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"key\": \"").append(publicKey.getKey())
.append("\", \"id\": \"").append(publicKey.getId()).append("\"}");
return firstEntry;
}
boolean auditLogPublicKeyEntry(StringBuilder auditDetails, String publicKeyId, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"id\": \"").append(publicKeyId).append("\"}");
return firstEntry;
}
void auditLogAssertions(StringBuilder auditDetails, String label, Collection<Assertion> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (Assertion value : values) {
firstEntry = auditLogAssertion(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogAssertion(StringBuilder auditDetails, Assertion assertion, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
String assertionEffect = "ALLOW";
if (assertion.getEffect() != null) {
assertionEffect = assertion.getEffect().toString();
}
auditDetails.append("{\"role\": \"").append(assertion.getRole())
.append("\", \"action\": \"").append(assertion.getAction())
.append("\", \"effect\": \"").append(assertionEffect)
.append("\", \"resource\": \"").append(assertion.getResource())
.append("\"}");
return firstEntry;
}
void auditLogDomain(StringBuilder auditDetails, Domain domain) {
auditDetails.append("{\"description\": \"").append(domain.getDescription())
.append("\", \"org\": \"").append(domain.getOrg())
.append("\", \"auditEnabled\": \"").append(domain.getAuditEnabled())
.append("\", \"enabled\": \"").append(domain.getEnabled())
.append("\", \"account\": \"").append(domain.getAccount())
.append("\", \"acctId\": \"").append(domain.getApplicationId())
.append("\", \"ypmid\": \"").append(domain.getYpmId())
.append("\", \"id\": \"").append(domain.getId())
.append("\", \"memberExpiryDays\": \"").append(domain.getMemberExpiryDays())
.append("\", \"serviceExpiryDays\": \"").append(domain.getServiceExpiryDays())
.append("\", \"tokenExpiryMins\": \"").append(domain.getTokenExpiryMins())
.append("\", \"serviceCertExpiryMins\": \"").append(domain.getServiceCertExpiryMins())
.append("\", \"roleCertExpiryMins\": \"").append(domain.getRoleCertExpiryMins())
.append("\"}");
}
void auditLogRoleSystemMeta(StringBuilder auditDetails, Role role, String roleName) {
auditDetails.append("{\"name\": \"").append(roleName)
.append("\", \"auditEnabled\": \"").append(role.getAuditEnabled())
.append("\"}");
}
void auditLogServiceIdentitySystemMeta(StringBuilder auditDetails, ServiceIdentity service, String serviceName) {
auditDetails.append("{\"name\": \"").append(serviceName)
.append("\", \"providerEndpoint\": \"").append(service.getProviderEndpoint())
.append("\"}");
}
void auditLogRoleMeta(StringBuilder auditDetails, Role role, String roleName) {
auditDetails.append("{\"name\": \"").append(roleName)
.append("\", \"selfServe\": \"").append(role.getSelfServe())
.append("\", \"memberExpiryDays\": \"").append(role.getMemberExpiryDays())
.append("\", \"serviceExpiryDays\": \"").append(role.getServiceExpiryDays())
.append("\", \"tokenExpiryMins\": \"").append(role.getTokenExpiryMins())
.append("\", \"certExpiryMins\": \"").append(role.getCertExpiryMins())
.append("\", \"memberReviewDays\": \"").append(role.getMemberReviewDays())
.append("\", \"serviceReviewDays\": \"").append(role.getServiceReviewDays())
.append("\", \"reviewEnabled\": \"").append(role.getReviewEnabled())
.append("\", \"notifyRoles\": \"").append(role.getNotifyRoles())
.append("\"}");
}
void executePutQuota(ResourceContext ctx, String domainName, Quota quota,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// process our insert quota. since this is a "single"
// operation, we are not using any transactions.
if (con.getQuota(domainName) != null) {
con.updateQuota(domainName, quota);
} else {
con.insertQuota(domainName, quota);
}
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, null);
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteQuota(ResourceContext ctx, String domainName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// process our delete quota request - it's a single
// operation so no need to make it a transaction
if (!con.deleteQuota(domainName)) {
throw ZMSUtils.notFoundError(caller + ": unable to delete quota: " + domainName, caller);
}
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
domainName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
public Quota getQuota(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return quotaCheck.getDomainQuota(con, domainName);
}
}
public void executePutRoleSystemMeta(ResourceContext ctx, String domainName, String roleName,
RoleSystemMeta meta, String attribute, boolean deleteAllowed, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ROLE);
if (domain.getAuditEnabled() != Boolean.TRUE) {
throw ZMSUtils.requestError(caller + ": auditEnabled flag not set for domain: " + domainName + " to add it on the role: " + roleName, caller);
}
Role originalRole = getRole(con, domainName, roleName, false, false, false);
// now process the request. first we're going to make a
// copy of our role
Role updatedRole = new Role()
.setName(originalRole.getName())
.setAuditEnabled(originalRole.getAuditEnabled())
.setTrust(originalRole.getTrust())
.setSelfServe(originalRole.getSelfServe())
.setMemberExpiryDays(originalRole.getMemberExpiryDays())
.setServiceExpiryDays(originalRole.getServiceExpiryDays())
.setTokenExpiryMins(originalRole.getTokenExpiryMins())
.setCertExpiryMins(originalRole.getCertExpiryMins())
.setMemberReviewDays(originalRole.getMemberReviewDays())
.setServiceReviewDays(originalRole.getServiceReviewDays())
.setSignAlgorithm(originalRole.getSignAlgorithm())
.setReviewEnabled(originalRole.getReviewEnabled())
.setNotifyRoles(originalRole.getNotifyRoles());
// then we're going to apply the updated fields
// from the given object
updateRoleSystemMetaFields(updatedRole, attribute, deleteAllowed, meta);
con.updateRole(domainName, updatedRole);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleSystemMeta(auditDetails, updatedRole, roleName);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
public void executePutServiceIdentitySystemMeta(ResourceContext ctx, String domainName, String serviceName,
ServiceIdentitySystemMeta meta, String attribute, boolean deleteAllowed, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// retrieve our original service identity object
ServiceIdentity serviceIdentity = getServiceIdentity(con, domainName, serviceName, false);
// then we're going to apply the updated fields
// from the given object
updateServiceIdentitySystemMetaFields(serviceIdentity, attribute, deleteAllowed, meta);
con.updateServiceIdentity(domainName, serviceIdentity);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogServiceIdentitySystemMeta(auditDetails, serviceIdentity, serviceName);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void updateRoleMetaFields(Role role, RoleMeta meta) {
if (meta.getSelfServe() != null) {
role.setSelfServe(meta.getSelfServe());
}
if (meta.getMemberExpiryDays() != null) {
role.setMemberExpiryDays(meta.getMemberExpiryDays());
}
if (meta.getServiceExpiryDays() != null) {
role.setServiceExpiryDays(meta.getServiceExpiryDays());
}
if (meta.getTokenExpiryMins() != null) {
role.setTokenExpiryMins(meta.getTokenExpiryMins());
}
if (meta.getCertExpiryMins() != null) {
role.setCertExpiryMins(meta.getCertExpiryMins());
}
if (meta.getSignAlgorithm() != null) {
role.setSignAlgorithm(meta.getSignAlgorithm());
}
if (meta.getReviewEnabled() != null) {
role.setReviewEnabled(meta.getReviewEnabled());
}
if (meta.getNotifyRoles() != null) {
role.setNotifyRoles(meta.getNotifyRoles());
}
if (meta.getMemberReviewDays() != null) {
role.setMemberReviewDays(meta.getMemberReviewDays());
}
if (meta.getServiceReviewDays() != null) {
role.setServiceReviewDays(meta.getServiceReviewDays());
}
if (meta.getUserAuthorityFilter() != null) {
role.setUserAuthorityFilter(meta.getUserAuthorityFilter());
}
if (meta.getUserAuthorityExpiration() != null) {
role.setUserAuthorityExpiration(meta.getUserAuthorityExpiration());
}
}
public void executePutRoleMeta(ResourceContext ctx, String domainName, String roleName, RoleMeta meta,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Role originalRole = getRole(con, domainName, roleName, false, false, false);
if (originalRole == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown role: " + roleName, caller);
}
checkRoleAuditEnabled(con, originalRole, auditRef, caller, getPrincipalName(ctx));
// now process the request. first we're going to make a
// copy of our role
Role updatedRole = new Role()
.setName(originalRole.getName())
.setAuditEnabled(originalRole.getAuditEnabled())
.setTrust(originalRole.getTrust())
.setSelfServe(originalRole.getSelfServe())
.setMemberExpiryDays(originalRole.getMemberExpiryDays())
.setServiceExpiryDays(originalRole.getServiceExpiryDays())
.setTokenExpiryMins(originalRole.getTokenExpiryMins())
.setCertExpiryMins(originalRole.getCertExpiryMins())
.setMemberReviewDays(originalRole.getMemberReviewDays())
.setServiceReviewDays(originalRole.getServiceReviewDays())
.setSignAlgorithm(originalRole.getSignAlgorithm())
.setReviewEnabled(originalRole.getReviewEnabled())
.setNotifyRoles(originalRole.getNotifyRoles())
.setUserAuthorityFilter(originalRole.getUserAuthorityFilter())
.setUserAuthorityExpiration(originalRole.getUserAuthorityExpiration());
// then we're going to apply the updated fields
// from the given object
updateRoleMetaFields(updatedRole, meta);
con.updateRole(domainName, updatedRole);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMeta(auditDetails, updatedRole, roleName);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
// if the role member expiry date or review date has changed then we're going
// process all the members in the role and update the expiration and review
// date accordingly
updateRoleMembersDueDates(ctx, con, domainName, roleName, originalRole,
updatedRole, auditRef, caller);
// if there was a change in the role user attribute filter then we need
// to make the necessary changes as well.
updateRoleMembersSystemDisabledState(ctx, con, domainName, roleName, originalRole,
updatedRole, auditRef, caller);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
private boolean isEarlierDueDate(long newDueDateMillis, Timestamp currentDueDate) {
return newDueDateMillis != 0 && (currentDueDate == null || currentDueDate.millis() > newDueDateMillis);
}
boolean updateUserAuthorityFilter(RoleMember roleMember, final String userAuthorityFilter) {
// if all the attributes are set then we need to make sure
// the members disabled state does not have the authority
// filter bit set
int newState;
int currentState = roleMember.getSystemDisabled() == null ? 0 : roleMember.getSystemDisabled();
if (ZMSUtils.isUserAuthorityFilterValid(zmsConfig.getUserAuthority(), userAuthorityFilter, roleMember.getMemberName())) {
newState = currentState & ~ZMSConsts.ZMS_DISABLED_AUTHORITY_FILTER;
} else {
newState = currentState | ZMSConsts.ZMS_DISABLED_AUTHORITY_FILTER;
}
if (newState != currentState) {
roleMember.setSystemDisabled(newState);
return true;
}
return false;
}
boolean updateUserAuthorityExpiry(RoleMember roleMember, final String userAuthorityExpiry) {
Date authorityExpiry = zmsConfig.getUserAuthority().getDateAttribute(roleMember.getMemberName(), userAuthorityExpiry);
// if we don't have a date then we'll expiry the user right away
// otherwise we'll set the date as imposed by the user authority
boolean expiryDateUpdated = false;
Timestamp memberExpiry = roleMember.getExpiration();
if (authorityExpiry == null) {
// we'll update the expiration date to be the current time
// if the user doesn't have one or it's expires sometime
// in the future
if (memberExpiry == null || (memberExpiry != null && memberExpiry.millis() > System.currentTimeMillis())) {
roleMember.setExpiration(Timestamp.fromCurrentTime());
expiryDateUpdated = true;
}
} else {
// update the expiration date if it does not match to the
// value specified by the user authority value
if (memberExpiry == null || (memberExpiry != null && memberExpiry.millis() != authorityExpiry.getTime())) {
roleMember.setExpiration(Timestamp.fromDate(authorityExpiry));
expiryDateUpdated = true;
}
}
return expiryDateUpdated;
}
List<RoleMember> getRoleMembersWithUpdatedDisabledState(List<RoleMember> roleMembers, final String authorityFilter) {
List<RoleMember> roleMembersWithUpdatedDueDates = new ArrayList<>();
// if the authority filter is null or empty then we're going to go
// through all of the members and remove the system disabled bit
// set for user authority
boolean filterDisabled = authorityFilter == null || authorityFilter.isEmpty();
int newState;
for (RoleMember roleMember : roleMembers) {
int currentState = roleMember.getSystemDisabled() == null ? 0 : roleMember.getSystemDisabled();
// if the filter is disabled then we're going through the list and
// make sure the disabled bit for the filter is unset
if (filterDisabled) {
newState = currentState & ~ZMSConsts.ZMS_DISABLED_AUTHORITY_FILTER;
} else {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), zmsConfig.getUserDomainPrefix(),
zmsConfig.getAddlUserCheckDomainPrefixList());
// if we have a user then we'll check if the filter is still valid
// for the user. for services, all should be disabled as they should
// not be included in this role
if (bUser && ZMSUtils.isUserAuthorityFilterValid(zmsConfig.getUserAuthority(), authorityFilter, roleMember.getMemberName())) {
newState = currentState & ~ZMSConsts.ZMS_DISABLED_AUTHORITY_FILTER;
} else {
newState = currentState | ZMSConsts.ZMS_DISABLED_AUTHORITY_FILTER;
}
}
if (newState != currentState) {
roleMember.setSystemDisabled(newState);
roleMembersWithUpdatedDueDates.add(roleMember);
}
}
return roleMembersWithUpdatedDueDates;
}
List<RoleMember> getRoleMembersWithUpdatedDueDates(List<RoleMember> roleMembers, Timestamp userExpiration,
long userExpiryMillis, Timestamp serviceExpiration, long serviceExpiryMillis,
Timestamp userReview, long userReviewMillis, Timestamp serviceReview,
long serviceReviewMillis, final String userAuthorityExpiry) {
List<RoleMember> roleMembersWithUpdatedDueDates = new ArrayList<>();
for (RoleMember roleMember : roleMembers) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), zmsConfig.getUserDomainPrefix(),
zmsConfig.getAddlUserCheckDomainPrefixList());
Timestamp expiration = roleMember.getExpiration();
Timestamp reviewDate = roleMember.getReviewReminder();
boolean dueDateUpdated = false;
if (bUser) {
if (isEarlierDueDate(userExpiryMillis, expiration)) {
roleMember.setExpiration(userExpiration);
dueDateUpdated = true;
}
if (isEarlierDueDate(userReviewMillis, reviewDate)) {
roleMember.setReviewReminder(userReview);
dueDateUpdated = true;
}
// if we have a user filter and/or expiry configured we need
// to make sure that the user still satisfies the filter
// otherwise we'll just expire the user right away
if (userAuthorityExpiry != null && updateUserAuthorityExpiry(roleMember, userAuthorityExpiry)) {
dueDateUpdated = true;
}
} else {
if (isEarlierDueDate(serviceExpiryMillis, expiration)) {
roleMember.setExpiration(serviceExpiration);
dueDateUpdated = true;
}
if (isEarlierDueDate(serviceReviewMillis, reviewDate)) {
roleMember.setReviewReminder(serviceReview);
dueDateUpdated = true;
}
// as a final check if we're dealing with a service and we have
// either one of the user authority attributes set then we're
// going to expiry the service immediately since the role cannot
// contain any non-users
if (userAuthorityExpiry != null) {
Timestamp serviceExpiry = roleMember.getExpiration();
if (serviceExpiry == null || (serviceExpiry != null && serviceExpiry.millis() > System.currentTimeMillis())) {
roleMember.setExpiration(Timestamp.fromCurrentTime());
dueDateUpdated = true;
}
}
}
if (dueDateUpdated) {
roleMembersWithUpdatedDueDates.add(roleMember);
}
}
return roleMembersWithUpdatedDueDates;
}
private boolean insertRoleMembers(ResourceContext ctx, ObjectStoreConnection con, List<RoleMember> roleMembers,
final String domainName, final String roleName, final String principal, final String auditRef,
final String caller) {
boolean bDataChanged = false;
for (RoleMember roleMember : roleMembers) {
try {
if (!con.insertRoleMember(domainName, roleName, roleMember, principal, auditRef)) {
LOG.error("unable to update member {}", roleMember.getMemberName());
continue;
}
} catch (Exception ex) {
LOG.error("unable to update member {} error: {}", roleMember.getMemberName(), ex.getMessage());
continue;
}
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName,
auditDetails.toString());
bDataChanged = true;
}
return bDataChanged;
}
boolean updateRoleMemberDisabledState(ResourceContext ctx, ObjectStoreConnection con, List<RoleMember> roleMembers,
final String domainName, final String roleName, final String principal, final String auditRef,
final String caller) {
boolean bDataChanged = false;
for (RoleMember roleMember : roleMembers) {
try {
if (!con.updateRoleMemberDisabledState(domainName, roleName, roleMember.getMemberName(), principal,
roleMember.getSystemDisabled(), auditRef)) {
LOG.error("unable to update member {}", roleMember.getMemberName());
continue;
}
} catch (Exception ex) {
LOG.error("unable to update member {} error: {}", roleMember.getMemberName(), ex.getMessage());
continue;
}
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName,
auditDetails.toString());
bDataChanged = true;
}
return bDataChanged;
}
boolean isUserAuthorityExpiryChanged(String originalValue, String newValue) {
// if we don't have a user authority defined then
// we assume there are no changes
if (zmsConfig.getUserAuthority() == null) {
return false;
}
// first let's make sure if we're given empty strings
// we treat them as nulls
if (originalValue != null && originalValue.isEmpty()) {
originalValue = null;
}
if (newValue != null && newValue.isEmpty()) {
newValue = null;
}
// we're only concerned if the value was either set or changed
// if the value was set and now was unset, it has no impact
// on the existing members so we're going to treat that as
// if the setting was not changed
if (newValue == null) {
return false;
} else {
return originalValue == null || !originalValue.equalsIgnoreCase(newValue);
}
}
boolean isUserAuthorityFilterChanged(String originalValue, String newValue) {
// if we don't have a user authority defined then
// we assume there are no changes
if (zmsConfig.getUserAuthority() == null) {
return false;
}
// first let's make sure if we're given empty strings
// we treat them as nulls
if (originalValue != null && originalValue.isEmpty()) {
originalValue = null;
}
if (newValue != null && newValue.isEmpty()) {
newValue = null;
}
if (newValue == null && originalValue == null) {
return false;
} else if (newValue == null || originalValue == null) {
return true;
} else {
return !originalValue.equalsIgnoreCase(newValue);
}
}
void updateRoleMembersSystemDisabledState(ResourceContext ctx, ObjectStoreConnection con, final String domainName,
final String roleName, Role originalRole, Role updatedRole, final String auditRef, final String caller) {
// if it's a delegated role then we have nothing to do
if (originalRole.getTrust() != null && !originalRole.getTrust().isEmpty()) {
return;
}
// if no role members, then there is nothing to do
final List<RoleMember> roleMembers = originalRole.getRoleMembers();
if (roleMembers == null || roleMembers.isEmpty()) {
return;
}
// check if the authority filter has changed otherwise we have
// nothing to do
if (!isUserAuthorityFilterChanged(originalRole.getUserAuthorityFilter(), updatedRole.getUserAuthorityFilter())) {
return;
}
final String principal = getPrincipalName(ctx);
// process our role members and if there were any changes processed then update
// our role and domain time-stamps, and invalidate local cache entry
List<RoleMember> roleMembersWithUpdatedDisabledState = getRoleMembersWithUpdatedDisabledState(roleMembers,
updatedRole.getUserAuthorityFilter());
if (updateRoleMemberDisabledState(ctx, con, roleMembersWithUpdatedDisabledState, domainName,
roleName, principal, auditRef, caller)) {
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
}
void updateRoleMembersDueDates(ResourceContext ctx, ObjectStoreConnection con, final String domainName,
final String roleName, Role originalRole, Role updatedRole, final String auditRef,
final String caller) {
// if it's a delegated role then we have nothing to do
if (originalRole.getTrust() != null && !originalRole.getTrust().isEmpty()) {
return;
}
// if no role members, then there is nothing to do
final List<RoleMember> roleMembers = originalRole.getRoleMembers();
if (roleMembers == null || roleMembers.isEmpty()) {
return;
}
// check if the user authority expiration attribute has been
// changed in which case we need to verify and update members
// accordingly
boolean userAuthorityExpiryChanged = isUserAuthorityExpiryChanged(originalRole.getUserAuthorityExpiration(),
updatedRole.getUserAuthorityExpiration());
// we only need to process the role members if the new due date
// is more restrictive than what we had before
boolean userMemberExpiryDayReduced = isNumOfDaysReduced(originalRole.getMemberExpiryDays(),
updatedRole.getMemberExpiryDays());
boolean serviceMemberExpiryDayReduced = isNumOfDaysReduced(originalRole.getServiceExpiryDays(),
updatedRole.getServiceExpiryDays());
boolean userMemberReviewDayReduced = isNumOfDaysReduced(originalRole.getMemberReviewDays(),
updatedRole.getMemberReviewDays());
boolean serviceMemberReviewDayReduced = isNumOfDaysReduced(originalRole.getServiceReviewDays(),
updatedRole.getServiceReviewDays());
if (!userMemberExpiryDayReduced && !serviceMemberExpiryDayReduced &&
!userMemberReviewDayReduced && !serviceMemberReviewDayReduced &&
!userAuthorityExpiryChanged) {
return;
}
// we're only going to process those role members whose
// due date is either not set or longer than the new limit
long userExpiryMillis = userMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedRole.getMemberExpiryDays(), TimeUnit.DAYS) : 0;
long serviceExpiryMillis = serviceMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedRole.getServiceExpiryDays(), TimeUnit.DAYS) : 0;
long userReviewMillis = userMemberReviewDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedRole.getMemberReviewDays(), TimeUnit.DAYS) : 0;
long serviceReviewMillis = serviceMemberReviewDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedRole.getServiceReviewDays(), TimeUnit.DAYS) : 0;
Timestamp userExpiration = Timestamp.fromMillis(userExpiryMillis);
Timestamp serviceExpiration = Timestamp.fromMillis(serviceExpiryMillis);
Timestamp userReview = Timestamp.fromMillis(userReviewMillis);
Timestamp serviceReview = Timestamp.fromMillis(serviceReviewMillis);
final String principal = getPrincipalName(ctx);
// process our role members and if there were any changes processed then update
// our role and domain time-stamps, and invalidate local cache entry
final String userAuthorityExpiry = userAuthorityExpiryChanged ? updatedRole.getUserAuthorityExpiration() : null;
List<RoleMember> roleMembersWithUpdatedDueDates = getRoleMembersWithUpdatedDueDates(roleMembers,
userExpiration, userExpiryMillis, serviceExpiration, serviceExpiryMillis,
userReview, userReviewMillis, serviceReview, serviceReviewMillis, userAuthorityExpiry);
if (insertRoleMembers(ctx, con, roleMembersWithUpdatedDueDates, domainName,
roleName, principal, auditRef, caller)) {
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
}
boolean isNumOfDaysReduced(Integer oldNumberOfDays, Integer newNumberOfDays) {
if (newNumberOfDays == null || newNumberOfDays <= 0) {
return false;
}
if (oldNumberOfDays == null || oldNumberOfDays <= 0) {
return true;
}
return newNumberOfDays < oldNumberOfDays;
}
/**
* If the role has audit enabled, and user did not provide the auditRef,
* an exception will be thrown.
**/
void checkRoleAuditEnabled(ObjectStoreConnection con, Role role, final String auditRef,
final String caller, final String principal) {
if (role.getAuditEnabled() == Boolean.TRUE) {
if (auditRef == null || auditRef.length() == 0) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference required for role: " + role.getName(), caller);
}
if (auditReferenceValidator != null && !auditReferenceValidator.validateReference(auditRef, principal, caller)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference validation failed for role: " + role.getName() +
", auditRef: " + auditRef, caller);
}
}
}
void executePutMembershipDecision(ResourceContext ctx, String domainName, String roleName,
RoleMember roleMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
String principal = getPrincipalName(ctx);
// make sure the role auditing requires are bet
Role originalRole = con.getRole(domainName, roleName);
if (originalRole == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown role: " + roleName, caller);
}
checkRoleAuditEnabled(con, originalRole, auditRef, caller, principal);
// process our confirm role member support
if (!con.confirmRoleMember(domainName, roleName, roleMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": unable to apply role membership decision for member: " +
roleMember.getMemberName() + " and role: " + roleName, caller);
}
// update our domain time-stamp and save changes
con.updateRoleModTimestamp(domainName, roleName);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
roleName, auditDetails.toString());
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
DomainRoleMembership getPendingDomainRoleMembers(final String principal) {
DomainRoleMembership domainRoleMembership = new DomainRoleMembership();
List<DomainRoleMembers> domainRoleMembersList = new ArrayList<>();
DomainRoleMembers domainRoleMembers;
try (ObjectStoreConnection con = store.getConnection(true, false)) {
Map<String, List<DomainRoleMember>> domainRoleMembersMap = con.getPendingDomainRoleMembers(principal);
if (domainRoleMembersMap != null) {
for (String domain : domainRoleMembersMap.keySet()) {
domainRoleMembers = new DomainRoleMembers();
domainRoleMembers.setDomainName(domain);
domainRoleMembers.setMembers(domainRoleMembersMap.get(domain));
domainRoleMembersList.add(domainRoleMembers);
}
domainRoleMembership.setDomainRoleMembersList(domainRoleMembersList);
}
}
return domainRoleMembership;
}
public Set<String> getPendingMembershipApproverRoles() {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
long updateTs = System.currentTimeMillis();
if (con.updatePendingRoleMembersNotificationTimestamp(zmsConfig.getServerHostName(), updateTs)) {
return con.getPendingMembershipApproverRoles(zmsConfig.getServerHostName(), updateTs);
}
}
return null;
}
public Map<String, DomainRoleMember> getRoleExpiryMembers() {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
long updateTs = System.currentTimeMillis();
if (con.updateRoleMemberExpirationNotificationTimestamp(zmsConfig.getServerHostName(), updateTs)) {
return con.getNotifyTemporaryRoleMembers(zmsConfig.getServerHostName(), updateTs);
}
}
return null;
}
public Map<String, DomainRoleMember> getRoleReviewMembers() {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
long updateTs = System.currentTimeMillis();
if (con.updateRoleMemberReviewNotificationTimestamp(zmsConfig.getServerHostName(), updateTs)) {
return con.getNotifyReviewRoleMembers(zmsConfig.getServerHostName(), updateTs);
}
}
return null;
}
public void processExpiredPendingMembers(int pendingRoleMemberLifespan, final String monitorIdentity) {
final String auditRef = "Expired - auto reject";
final String caller = "processExpiredPendingMembers";
Map<String, List<DomainRoleMember>> memberList;
try (ObjectStoreConnection con = store.getConnection(true, false)) {
memberList = con.getExpiredPendingDomainRoleMembers(pendingRoleMemberLifespan);
}
// delete each member and record each expired member in audit log in a transaction
for (String domainName : memberList.keySet()) {
for (DomainRoleMember domainRoleMember : memberList.get(domainName)) {
final String principalName = domainRoleMember.getMemberName();
for (MemberRole memberRole : domainRoleMember.getMemberRoles()) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
if (con.deletePendingRoleMember(domainName, memberRole.getRoleName(),
principalName, monitorIdentity, auditRef)) {
auditLogRequest(monitorIdentity, domainName, auditRef, caller,
"REJECT", memberRole.getRoleName(),
"{\"member\": \"" + principalName + "\"}");
}
}
}
}
}
}
void executePutRoleReview(ResourceContext ctx, String domainName, String roleName, Role role,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
final String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// retrieve our original role
Role originalRole = getRole(con, domainName, roleName, false, false, false);
if (originalRole.getTrust() != null && !originalRole.getTrust().isEmpty()) {
throw ZMSUtils.requestError(caller + ": role " + roleName + " is delegated. Review should happen on the trusted role. ", caller);
}
// now process the request. first we're going to make a copy of our role
Role updatedRole = new Role()
.setName(originalRole.getName());
// then we're going to apply the updated expiry and/or active status from the incoming role
List<RoleMember> noactionMembers = applyMembershipChanges(updatedRole, originalRole, role, auditRef);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
List<RoleMember> deletedMembers = new ArrayList<>();
List<RoleMember> extendedMembers = new ArrayList<>();
auditDetails.append("{\"name\": \"").append(roleName).append('\"')
.append(", \"selfServe\": ").append(originalRole.getSelfServe() == Boolean.TRUE ? "true" : "false")
.append(", \"auditEnabled\": ").append(originalRole.getAuditEnabled() == Boolean.TRUE ? "true" : "false");
for (RoleMember member : updatedRole.getRoleMembers()) {
// if active flag is coming as false for the member, that means it's flagged for deletion
if (member.getActive() == Boolean.FALSE) {
if (!con.deleteRoleMember(domainName, roleName, member.getMemberName(), principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete role member: " +
member.getMemberName() + " from role: " + roleName, caller);
}
deletedMembers.add(member);
} else {
// if not marked for deletion, then we are going to extend the member
if (!con.insertRoleMember(domainName, roleName, member, principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to extend role member: " +
member.getMemberName() + " for the role: " + roleName, caller);
}
extendedMembers.add(member);
}
}
// construct audit log details
auditLogRoleMembers(auditDetails, "deleted-members", deletedMembers);
auditLogRoleMembers(auditDetails, "extended-members", extendedMembers);
auditLogRoleMembers(auditDetails, "no-action-members", noactionMembers);
auditDetails.append("}");
if (!deletedMembers.isEmpty() || !extendedMembers.isEmpty()) {
// we have one or more changes to the role. We should update both lastReviewed as well as modified timestamps
con.updateRoleModTimestamp(domainName, roleName);
con.updateRoleReviewTimestamp(domainName, roleName);
} else {
// since "no-action" is still a review, we are updating lastReviewed timestamp
con.updateRoleReviewTimestamp(domainName, roleName);
}
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, "REVIEW", roleName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
/**
* This method takes the input role, creates a map using memberName as key,
* copies members from original role from DB and only adds deleted / extended members to the updatedRole.
* @param updatedRole updated role to be sent to DB to record changes
* @param originalRole original role from DB
* @param role incoming role containing changes from domain admin
* @param auditRef audit ref for the change
* @return List of rolemember where no action was taken
*/
List<RoleMember> applyMembershipChanges(Role updatedRole, Role originalRole, Role role, String auditRef) {
Map<String, RoleMember> incomingMemberMap =
role.getRoleMembers().stream().collect(Collectors.toMap(RoleMember::getMemberName, item -> item));
List<RoleMember> noActionMembers = new ArrayList<>(originalRole.getRoleMembers().size());
// updatedMembers size is driven by input
List<RoleMember> updatedMembers = new ArrayList<>(incomingMemberMap.size());
updatedRole.setRoleMembers(updatedMembers);
RoleMember updatedMember;
// if original role is auditEnabled then all the extensions should be sent for approval again.
boolean approvalStatus = originalRole.getAuditEnabled() != Boolean.TRUE;
RoleMember tempMemberFromMap;
for (RoleMember originalMember : originalRole.getRoleMembers()) {
// we are only going to update the changed members
if (incomingMemberMap.containsKey(originalMember.getMemberName())) {
updatedMember = new RoleMember();
updatedMember.setMemberName(originalMember.getMemberName());
tempMemberFromMap = incomingMemberMap.get(updatedMember.getMemberName());
// member's approval status is determined by auditEnabled flag set on original role
updatedMember.setApproved(approvalStatus);
// member's active status is determined by action taken in UI
updatedMember.setActive(tempMemberFromMap.getActive());
// member's new expiration is set by role / domain level expiration setting
updatedMember.setExpiration(tempMemberFromMap.getExpiration());
updatedMember.setAuditRef(auditRef);
updatedMembers.add(updatedMember);
} else {
noActionMembers.add(originalMember);
}
}
return noActionMembers;
}
void updateDomainModTimestamp(final String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// update domain time-stamps, and invalidate local cache entry
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
}
List<TemplateMetaData> getDomainTemplates(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getDomainTemplates(domainName);
}
}
void processUserAuthorityRestrictions() {
// if we don't have a user authority defined then there
// is no work to be done
if (zmsConfig.getUserAuthority() == null) {
return;
}
// first we need to get all the roles that have the authority
// filter or date expiry attributes set
List<MemberRole> roles;
try (ObjectStoreConnection con = store.getConnection(true, false)) {
roles = con.listRolesWithUserAuthorityRestrictions();
}
if (roles == null) {
return;
}
// for each role catch any exception and ignore since we
// want to process all roles and not allow a single one
// prevent updating others
for (MemberRole role : roles) {
try {
enforceRoleUserAuthorityRestrictions(role.getDomainName(), role.getRoleName());
} catch (Exception ex) {
LOG.error("Unable to process user authority restrictions for {}:role.{} - {}",
role.getDomainName(), role.getRoleName(), ex.getMessage());
}
}
}
Map<String, List<String>> applyTemplatesForListOfDomains(Map<String, Integer> templateDetails) {
final String caller = "applyTemplatesForListOfDomains";
final String auditRef = "AutoApplyTemplate";
Map<String, List<String>> domainTemplateListMap;
DomainTemplate domainTemplate = new DomainTemplate();
try (ObjectStoreConnection con = store.getConnection(true, false)) {
domainTemplateListMap = con.getDomainFromTemplateName(templateDetails);
}
for (String domainName : domainTemplateListMap.keySet()) {
domainTemplate.setTemplateNames(domainTemplateListMap.get(domainName));
//Passing null context since it is an internal call during app start up
//executePutDomainTemplate can bulk apply templates given a domain hence sending domainName and templatelist
try {
this.executePutDomainTemplate(null, domainName, domainTemplate, auditRef, caller);
} catch (Exception ex) {
LOG.error("unable to apply template for domain {} and template {} error: {}", domainName, domainTemplate, ex.getMessage());
continue;
}
}
return domainTemplateListMap;
}
void enforceRoleUserAuthorityRestrictions(final String domainName, final String roleName) {
final String caller = "enforceRoleUserAuthorityRestrictions";
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// get the role from the storage system
Role role = getRole(con, domainName, roleName, false, false, false);
if (role == null) {
return;
}
// update the role membership
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers == null) {
return;
}
// first process the authority expiration restriction
boolean expiryDBUpdated = false;
final String userAuthorityExpiry = role.getUserAuthorityExpiration();
if (userAuthorityExpiry != null) {
List<RoleMember> updatedMembers = new ArrayList();
for (RoleMember roleMember : roleMembers) {
if (updateUserAuthorityExpiry(roleMember, userAuthorityExpiry)) {
updatedMembers.add(roleMember);
}
}
expiryDBUpdated = insertRoleMembers(null, con, updatedMembers, domainName, roleName,
ZMSConsts.SYS_AUTH_MONITOR, AUDIT_REF, caller);
}
// now process authority filter restriction
boolean filterDBUpdated = false;
final String userAuthorityFilter = role.getUserAuthorityFilter();
if (userAuthorityFilter != null) {
List<RoleMember> updatedMembers = new ArrayList();
for (RoleMember roleMember : roleMembers) {
if (updateUserAuthorityFilter(roleMember, userAuthorityFilter)) {
updatedMembers.add(roleMember);
}
}
filterDBUpdated = updateRoleMemberDisabledState(null, con, updatedMembers, domainName,
roleName, ZMSConsts.SYS_AUTH_MONITOR, AUDIT_REF, caller);
}
if (expiryDBUpdated || filterDBUpdated) {
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
}
}
class UserAuthorityFilterEnforcer implements Runnable {
public UserAuthorityFilterEnforcer() {
}
@Override
public void run() {
LOG.info("UserAuthorityFilterEnforcer: Starting user authority filter enforcer thread...");
try {
processUserAuthorityRestrictions();
} catch (Throwable t) {
LOG.error("UserAuthorityFilterEnforcer: unable to enforce user authority restrictions: {}",
t.getMessage());
}
LOG.info("UserAuthorityFilterEnforcer: Completed user authority filter enforcer thread");
}
}
}
| 1 | 5,163 | we cannot change the behavior of the processRole - that method is used in lots of places. this call must be done only in the method where templates are being handled. | AthenZ-athenz | java |
@@ -63,7 +63,9 @@ const (
// ECSAgentExecConfigDir is the directory where ECS Agent will write the ExecAgent config files to
ECSAgentExecConfigDir = ecsAgentExecDepsDir + "/" + ContainerConfigDirName
// HostExecConfigDir is the dir where ExecAgents Config files will live
- HostExecConfigDir = hostExecDepsDir + "/" + ContainerConfigDirName
+ HostExecConfigDir = hostExecDepsDir + "/" + ContainerConfigDirName
+ ExecAgentLogConfigFileName = "seelog.xml"
+ ContainerLogConfigFile = "configuration/" + ExecAgentLogConfigFileName
)
var ( | 1 | // +build linux
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package execcmd
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/pborman/uuid"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
)
const (
namelessContainerPrefix = "nameless-container-"
ecsAgentExecDepsDir = "/managed-agents/execute-command"
// ecsAgentDepsBinDir is the directory where ECS Agent will read versions of SSM agent
ecsAgentDepsBinDir = ecsAgentExecDepsDir + "/bin"
ContainerDepsDirPrefix = "/ecs-execute-command-"
// filePerm is the permission for the exec agent config file.
filePerm = 0644
defaultSessionLimit = 2
SSMAgentBinName = "amazon-ssm-agent"
SSMAgentWorkerBinName = "ssm-agent-worker"
SessionWorkerBinName = "ssm-session-worker"
HostLogDir = "/var/log/ecs/exec"
ContainerLogDir = "/var/log/amazon/ssm"
ECSAgentExecLogDir = "/log/exec"
HostCertFile = "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"
ContainerCertFileSuffix = "certs/amazon-ssm-agent.crt"
containerConfigFileName = "amazon-ssm-agent.json"
ContainerConfigDirName = "config"
ContainerConfigFileSuffix = "configuration/" + containerConfigFileName
// ECSAgentExecConfigDir is the directory where ECS Agent will write the ExecAgent config files to
ECSAgentExecConfigDir = ecsAgentExecDepsDir + "/" + ContainerConfigDirName
// HostExecConfigDir is the dir where ExecAgents Config files will live
HostExecConfigDir = hostExecDepsDir + "/" + ContainerConfigDirName
)
var (
execAgentConfigTemplate = `{
"Mgs": {
"Region": "",
"Endpoint": "",
"StopTimeoutMillis": 20000,
"SessionWorkersLimit": %d
},
"Agent": {
"Region": "",
"OrchestrationRootDir": "",
"ContainerMode": true
}
}`
// TODO: [ecs-exec] seelog config needs to be implemented following a similar approach to ss, config
execAgentConfigFileNameTemplate = `amazon-ssm-agent-%s.json`
errExecCommandManagedAgentNotFound = fmt.Errorf("managed agent not found (%s)", ExecuteCommandAgentName)
)
// InitializeContainer adds the necessary bind mounts in order for the ExecCommandAgent to run properly in the container
// TODO: [ecs-exec] Should we validate the ssm agent binaries & certs are valid and fail here if they're not? (bind mount will succeed even if files don't exist in host)
func (m *manager) InitializeContainer(taskId string, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig) error {
ma, ok := container.GetManagedAgentByName(ExecuteCommandAgentName)
if !ok {
return errExecCommandManagedAgentNotFound
}
configFile, err := GetExecAgentConfigFileName(getSessionWorkersLimit(ma))
if err != nil {
return fmt.Errorf("could not generate ExecAgent Config File: %v", err)
}
uuid := newUUID()
containerDepsFolder := ContainerDepsDirPrefix + uuid
latestBinVersionDir, err := m.getLatestVersionedHostBinDir()
if err != nil {
return err
}
// Add ssm binary mounts
hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping(
filepath.Join(latestBinVersionDir, SSMAgentBinName),
filepath.Join(containerDepsFolder, SSMAgentBinName)))
hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping(
filepath.Join(latestBinVersionDir, SSMAgentWorkerBinName),
filepath.Join(containerDepsFolder, SSMAgentWorkerBinName)))
hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping(
filepath.Join(latestBinVersionDir, SessionWorkerBinName),
filepath.Join(containerDepsFolder, SessionWorkerBinName)))
// Add ssm agent config file mount
hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping(
filepath.Join(HostExecConfigDir, configFile),
filepath.Join(containerDepsFolder, ContainerConfigFileSuffix)))
// Append TLS cert mount
hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping(
HostCertFile, // TODO: [ecs-exec] decision pending - review the location of the certs in the host
filepath.Join(containerDepsFolder, ContainerCertFileSuffix)))
// Add ssm log bind mount
cn := fileSystemSafeContainerName(container)
hostConfig.Binds = append(hostConfig.Binds, getBindMountMapping(
filepath.Join(HostLogDir, taskId, cn),
ContainerLogDir))
container.UpdateManagedAgentByName(ExecuteCommandAgentName, apicontainer.ManagedAgentState{
ID: uuid,
})
return nil
}
func (m *manager) getLatestVersionedHostBinDir() (string, error) {
versions, err := retrieveAgentVersions(ecsAgentDepsBinDir)
if err != nil {
return "", err
}
sort.Sort(sort.Reverse(byAgentVersion(versions)))
var latest string
for _, v := range versions {
vStr := v.String()
ecsAgentDepsVersionedBinDir := filepath.Join(ecsAgentDepsBinDir, vStr)
if !fileExists(filepath.Join(ecsAgentDepsVersionedBinDir, SSMAgentBinName)) {
continue // try falling back to the previous version
}
// TODO: [ecs-exec] This requirement will be removed for SSM agent V2
if !fileExists(filepath.Join(ecsAgentDepsVersionedBinDir, SSMAgentWorkerBinName)) {
continue // try falling back to the previous version
}
if !fileExists(filepath.Join(ecsAgentDepsVersionedBinDir, SessionWorkerBinName)) {
continue // try falling back to the previous version
}
latest = filepath.Join(m.hostBinDir, vStr)
break
}
if latest == "" {
return "", fmt.Errorf("no valid versions were found in %s", m.hostBinDir)
}
return latest, nil
}
func getReadOnlyBindMountMapping(hostDir, containerDir string) string {
return getBindMountMapping(hostDir, containerDir) + ":ro"
}
func getBindMountMapping(hostDir, containerDir string) string {
return hostDir + ":" + containerDir
}
var newUUID = uuid.New
func fileSystemSafeContainerName(c *apicontainer.Container) string {
// Trim leading hyphens since they're not valid directory names
cn := strings.TrimLeft(c.Name, "-")
if cn == "" {
// Fallback name in the extreme case that we end up with an empty string after trimming all leading hyphens.
return namelessContainerPrefix + newUUID()
}
return cn
}
func getSessionWorkersLimit(ma apicontainer.ManagedAgent) int {
// TODO [ecs-exec] : verify that returning the default session limit (2) is ok in case of any errors, misconfiguration
limit := defaultSessionLimit
if ma.Properties == nil { // This means ACS didn't send the limit
return limit
}
limitStr, ok := ma.Properties["SessionWorkersLimit"]
if !ok { // This also means ACS didn't send the limit
return limit
}
limit, err := strconv.Atoi(limitStr)
if err != nil { // This means ACS send a limit that can't be converted to an int
return limit
}
if limit <= 0 {
limit = defaultSessionLimit
}
return limit
}
var GetExecAgentConfigFileName = getAgentConfigFileName
func getAgentConfigFileName(sessionLimit int) (string, error) {
config := fmt.Sprintf(execAgentConfigTemplate, sessionLimit)
hash := getExecAgentConfigHash(config)
configFileName := fmt.Sprintf(execAgentConfigFileNameTemplate, hash)
// check if config file exists already
configFilePath := filepath.Join(ECSAgentExecConfigDir, configFileName)
if fileExists(configFilePath) {
// TODO: verify the hash of the existing file contents
return configFileName, nil
}
// config doesn't exist; create a new one
if err := createNewExecAgentConfigFile(config, configFilePath); err != nil {
return "", err
}
return configFileName, nil
}
func getExecAgentConfigHash(config string) string {
hash := sha256.New()
hash.Write([]byte(config))
return base64.URLEncoding.EncodeToString(hash.Sum(nil))
}
var osStat = os.Stat
func fileExists(path string) bool {
if fi, err := osStat(path); err == nil {
return !fi.IsDir()
}
return false
}
var createNewExecAgentConfigFile = createNewConfigFile
func createNewConfigFile(config, configFilePath string) error {
return ioutil.WriteFile(configFilePath, []byte(config), filePerm)
}
| 1 | 25,568 | I think we probably want to follow the same naming convention that we do with `execAgentConfigFileNameTemplate` (using the SHA in the name of the file). This file might be confgurable in the future and when that happens we will be ready. Plus it's pretty much the same code that we already use for the config file. | aws-amazon-ecs-agent | go |
@@ -380,6 +380,14 @@ static void send_resource_update(struct link *manager)
total_resources->disk.total = MAX(0, local_resources->disk.total);
total_resources->disk.largest = MAX(0, local_resources->disk.largest);
total_resources->disk.smallest = MAX(0, local_resources->disk.smallest);
+
+ //if workers are set to expire at some time, send the amount of time left to manager
+ if(manual_wall_time_option != 0) {
+ total_resources->time_left = worker_start_time + manual_wall_time_option - time(0);
+ }
+ else {
+ total_resources->time_left = -1;
+ }
}
work_queue_resources_send(manager,total_resources,stoptime); | 1 | /*
Copyright (C) 2008- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include "work_queue.h"
#include "work_queue_protocol.h"
#include "work_queue_internal.h"
#include "work_queue_resources.h"
#include "work_queue_process.h"
#include "work_queue_catalog.h"
#include "work_queue_watcher.h"
#include "work_queue_gpus.h"
#include "cctools.h"
#include "macros.h"
#include "catalog_query.h"
#include "domain_name_cache.h"
#include "jx.h"
#include "jx_eval.h"
#include "jx_parse.h"
#include "jx_print.h"
#include "copy_stream.h"
#include "host_memory_info.h"
#include "host_disk_info.h"
#include "path_disk_size_info.h"
#include "hash_cache.h"
#include "link.h"
#include "link_auth.h"
#include "list.h"
#include "xxmalloc.h"
#include "debug.h"
#include "stringtools.h"
#include "path.h"
#include "load_average.h"
#include "getopt.h"
#include "getopt_aux.h"
#include "create_dir.h"
#include "delete_dir.h"
#include "itable.h"
#include "random.h"
#include "url_encode.h"
#include "md5.h"
#include "disk_alloc.h"
#include "hash_table.h"
#include "pattern.h"
#include "gpu_info.h"
#include "tlq_config.h"
#include <unistd.h>
#include <dirent.h>
#include <fcntl.h>
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <math.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <poll.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <sys/wait.h>
typedef enum {
WORKER_MODE_WORKER,
WORKER_MODE_FOREMAN
} worker_mode_t;
typedef enum {
CONTAINER_MODE_NONE,
CONTAINER_MODE_DOCKER,
CONTAINER_MODE_DOCKER_PRESERVE,
CONTAINER_MODE_UMBRELLA
} container_mode_t;
#define DOCKER_WORK_DIR "/home/worker"
// In single shot mode, immediately quit when disconnected.
// Useful for accelerating the test suite.
static int single_shot_mode = 0;
// Maximum time to stay connected to a single manager without any work.
static int idle_timeout = 900;
// Current time at which we will give up if no work is received.
static time_t idle_stoptime = 0;
// Current time at which we will give up if no manager is found.
static time_t connect_stoptime = 0;
// Maximum time to attempt connecting to all available managers before giving up.
static int connect_timeout = 900;
// Maximum time to attempt sending/receiving any given file or message.
static const int active_timeout = 3600;
// Maximum time for the foreman to spend waiting in its internal loop
static const int foreman_internal_timeout = 5;
// Initial value for backoff interval (in seconds) when worker fails to connect to a manager.
static int init_backoff_interval = 1;
// Maximum value for backoff interval (in seconds) when worker fails to connect to a manager.
static int max_backoff_interval = 60;
// Chance that a worker will decide to shut down each minute without warning, to simulate failure.
static double worker_volatility = 0.0;
// If flag is set, then the worker proceeds to immediately cleanup and shut down.
// This can be set by Ctrl-C or by any condition that prevents further progress.
static int abort_flag = 0;
// Record the signal received, to inform the manager if appropiate.
static int abort_signal_received = 0;
// Flag used to indicate a child must be waited for.
static int sigchld_received_flag = 0;
// Password shared between manager and worker.
char *password = 0;
// Allow worker to use symlinks when link() fails. Enabled by default.
static int symlinks_enabled = 1;
// Worker id. A unique id for this worker instance.
static char *worker_id;
// pid of the worker's parent process. If different from zero, worker will be
// terminated when its parent process changes.
static pid_t initial_ppid = 0;
static worker_mode_t worker_mode = WORKER_MODE_WORKER;
static container_mode_t container_mode = CONTAINER_MODE_NONE;
static int load_from_tar = 0;
struct manager_address {
char host[DOMAIN_NAME_MAX];
int port;
char addr[DOMAIN_NAME_MAX];
};
struct list *manager_addresses;
struct manager_address *current_manager_address;
static char *workspace;
static char *os_name = NULL;
static char *arch_name = NULL;
static char *user_specified_workdir = NULL;
static time_t worker_start_time = 0;
static struct work_queue_watcher * watcher = 0;
static struct work_queue_resources * local_resources = 0;
struct work_queue_resources * total_resources = 0;
struct work_queue_resources * total_resources_last = 0;
static int64_t last_task_received = 0;
static int64_t manual_cores_option = 0;
static int64_t manual_disk_option = 0;
static int64_t manual_memory_option = 0;
static int64_t manual_gpus_option = 0;
static time_t manual_wall_time_option = 0;
static int64_t cores_allocated = 0;
static int64_t memory_allocated = 0;
static int64_t disk_allocated = 0;
static int64_t gpus_allocated = 0;
// Allow worker to use disk_alloc loop devices for task sandbox. Disabled by default.
static int disk_allocation = 0;
static int64_t files_counted = 0;
static int check_resources_interval = 5;
static int max_time_on_measurement = 3;
static struct work_queue *foreman_q = NULL;
// docker image name
static char *img_name = NULL;
static char *container_name = NULL;
static char *tar_fn = NULL;
// Table of all processes in any state, indexed by taskid.
// Processes should be created/deleted when added/removed from this table.
static struct itable *procs_table = NULL;
// Table of all processes currently running, indexed by pid.
// These are additional pointers into procs_table.
static struct itable *procs_running = NULL;
// List of all procs that are waiting to be run.
// These are additional pointers into procs_table.
static struct list *procs_waiting = NULL;
// Table of all processes with results to be sent back, indexed by taskid.
// These are additional pointers into procs_table.
static struct itable *procs_complete = NULL;
//User specified features this worker provides.
static struct hash_table *features = NULL;
static int results_to_be_sent_msg = 0;
static timestamp_t total_task_execution_time = 0;
static int total_tasks_executed = 0;
static const char *project_regex = 0;
static int released_by_manager = 0;
static char *tlq_url = NULL;
static char *debug_path = NULL;
static char *catalog_hosts = NULL;
static int tlq_port = 0;
__attribute__ (( format(printf,2,3) ))
static void send_manager_message( struct link *manager, const char *fmt, ... )
{
char debug_msg[2*WORK_QUEUE_LINE_MAX];
va_list va;
va_list debug_va;
va_start(va,fmt);
string_nformat(debug_msg, sizeof(debug_msg), "tx to manager: %s", fmt);
va_copy(debug_va, va);
vdebug(D_WQ, debug_msg, debug_va);
link_putvfstring(manager, fmt, time(0)+active_timeout, va);
va_end(va);
}
static int recv_manager_message( struct link *manager, char *line, int length, time_t stoptime )
{
int result = link_readline(manager,line,length,stoptime);
if(result) debug(D_WQ,"rx from manager: %s",line);
return result;
}
/*
We track how much time has elapsed since the manager assigned a task.
If time(0) > idle_stoptime, then the worker will disconnect.
*/
void reset_idle_timer()
{
idle_stoptime = time(0) + idle_timeout;
}
/*
Measure the disk used by the worker. We only manually measure the cache directory, as processes measure themselves.
*/
int64_t measure_worker_disk() {
static struct path_disk_size_info *state = NULL;
path_disk_size_info_get_r("./cache", max_time_on_measurement, &state);
int64_t disk_measured = 0;
if(state->last_byte_size_complete >= 0) {
disk_measured = (int64_t) ceil(state->last_byte_size_complete/(1.0*MEGA));
}
files_counted = state->last_file_count_complete;
if(state->complete_measurement) {
/* if a complete measurement has been done, then update
* for the found value, and add the known values of the processes. */
struct work_queue_process *p;
uint64_t taskid;
itable_firstkey(procs_table);
while(itable_nextkey(procs_table,&taskid,(void**)&p)) {
if(p->sandbox_size > 0) {
disk_measured += p->sandbox_size;
files_counted += p->sandbox_file_count;
}
}
}
return disk_measured;
}
/*
Measure only the resources associated with this particular node
and apply any operations that override.
*/
void measure_worker_resources()
{
static time_t last_resources_measurement = 0;
if(time(0) < last_resources_measurement + check_resources_interval) {
return;
}
struct work_queue_resources *r = local_resources;
work_queue_resources_measure_locally(r,workspace);
if(worker_mode == WORKER_MODE_FOREMAN) {
aggregate_workers_resources(foreman_q, total_resources, features);
} else {
if(manual_cores_option > 0)
r->cores.total = manual_cores_option;
if(manual_memory_option)
r->memory.total = manual_memory_option;
if(manual_gpus_option)
r->gpus.total = manual_gpus_option;
}
if(manual_disk_option)
r->disk.total = MIN(r->disk.total, manual_disk_option);
r->cores.smallest = r->cores.largest = r->cores.total;
r->memory.smallest = r->memory.largest = r->memory.total;
r->disk.smallest = r->disk.largest = r->disk.total;
r->gpus.smallest = r->gpus.largest = r->gpus.total;
r->disk.inuse = measure_worker_disk();
r->tag = last_task_received;
if(worker_mode == WORKER_MODE_FOREMAN) {
total_resources->disk.total = r->disk.total;
total_resources->disk.inuse = r->disk.inuse;
total_resources->tag = last_task_received;
} else {
/* in a regular worker, total and local resources are the same. */
memcpy(total_resources, r, sizeof(struct work_queue_resources));
}
work_queue_gpus_init(r->gpus.total);
last_resources_measurement = time(0);
}
/*
Send a message to the manager with user defined features.
*/
static void send_features(struct link *manager) {
char *f;
void *dummy;
hash_table_firstkey(features);
char fenc[WORK_QUEUE_LINE_MAX];
while(hash_table_nextkey(features, &f, &dummy)) {
url_encode(f, fenc, WORK_QUEUE_LINE_MAX);
send_manager_message(manager, "feature %s\n", fenc);
}
}
/*
Send a message to the manager with my current resources.
*/
static void send_resource_update(struct link *manager)
{
time_t stoptime = time(0) + active_timeout;
if(worker_mode == WORKER_MODE_FOREMAN) {
total_resources->disk.total = local_resources->disk.total;
total_resources->disk.inuse = local_resources->disk.inuse;
} else {
total_resources->memory.total = MAX(0, local_resources->memory.total);
total_resources->memory.largest = MAX(0, local_resources->memory.largest);
total_resources->memory.smallest = MAX(0, local_resources->memory.smallest);
total_resources->disk.total = MAX(0, local_resources->disk.total);
total_resources->disk.largest = MAX(0, local_resources->disk.largest);
total_resources->disk.smallest = MAX(0, local_resources->disk.smallest);
}
work_queue_resources_send(manager,total_resources,stoptime);
send_manager_message(manager, "info end_of_resource_update %d\n", 0);
}
/*
Send a message to the manager with my current statistics information.
*/
static void send_stats_update(struct link *manager)
{
if(worker_mode == WORKER_MODE_FOREMAN) {
struct work_queue_stats s;
work_queue_get_stats_hierarchy(foreman_q, &s);
send_manager_message(manager, "info workers_joined %lld\n", (long long) s.workers_joined);
send_manager_message(manager, "info workers_removed %lld\n", (long long) s.workers_removed);
send_manager_message(manager, "info workers_released %lld\n", (long long) s.workers_released);
send_manager_message(manager, "info workers_idled_out %lld\n", (long long) s.workers_idled_out);
send_manager_message(manager, "info workers_fast_aborted %lld\n", (long long) s.workers_fast_aborted);
send_manager_message(manager, "info workers_blacklisted %lld\n", (long long) s.workers_blacklisted);
send_manager_message(manager, "info workers_lost %lld\n", (long long) s.workers_lost);
send_manager_message(manager, "info tasks_waiting %lld\n", (long long) s.tasks_waiting);
send_manager_message(manager, "info tasks_on_workers %lld\n", (long long) s.tasks_on_workers);
send_manager_message(manager, "info tasks_running %lld\n", (long long) s.tasks_running);
send_manager_message(manager, "info tasks_waiting %lld\n", (long long) list_size(procs_waiting));
send_manager_message(manager, "info tasks_with_results %lld\n", (long long) s.tasks_with_results);
send_manager_message(manager, "info time_send %lld\n", (long long) s.time_send);
send_manager_message(manager, "info time_receive %lld\n", (long long) s.time_receive);
send_manager_message(manager, "info time_send_good %lld\n", (long long) s.time_send_good);
send_manager_message(manager, "info time_receive_good %lld\n", (long long) s.time_receive_good);
send_manager_message(manager, "info time_workers_execute %lld\n", (long long) s.time_workers_execute);
send_manager_message(manager, "info time_workers_execute_good %lld\n", (long long) s.time_workers_execute_good);
send_manager_message(manager, "info time_workers_execute_exhaustion %lld\n", (long long) s.time_workers_execute_exhaustion);
send_manager_message(manager, "info bytes_sent %lld\n", (long long) s.bytes_sent);
send_manager_message(manager, "info bytes_received %lld\n", (long long) s.bytes_received);
}
else {
send_manager_message(manager, "info tasks_running %lld\n", (long long) itable_size(procs_running));
}
}
static int send_keepalive(struct link *manager, int force_resources){
send_manager_message(manager, "alive\n");
/* for regular workers we only send resources on special ocassions, thus
* the force_resources. */
if(force_resources || worker_mode == WORKER_MODE_FOREMAN) {
send_resource_update(manager);
}
send_stats_update(manager);
return 1;
}
static int send_tlq_config( struct link *manager ) {
//attempt to find local TLQ server to retrieve manager URL
if(tlq_port && debug_path && !tlq_url) {
debug(D_TLQ, "looking up worker TLQ URL");
time_t config_stoptime = time(0) + 10;
tlq_url = tlq_config_url(tlq_port, debug_path, config_stoptime);
if(tlq_url) debug(D_TLQ, "set worker TLQ URL: %s", tlq_url);
else debug(D_TLQ, "error setting worker TLQ URL");
}
else if(tlq_port && !debug_path && !tlq_url) debug(D_TLQ, "cannot get worker TLQ URL: no debug log path set");
if(tlq_url) send_manager_message(manager, "tlq %s\n", tlq_url);
return 1;
}
static int get_task_tlq_url( struct work_queue_task *task ) {
if(tlq_port && debug_path) {
char home_host[WORK_QUEUE_LINE_MAX];
char tlq_workdir[WORK_QUEUE_LINE_MAX];
char log_path[WORK_QUEUE_LINE_MAX];
int home_port;
debug(D_TLQ, "looking up task %d TLQ URL", task->taskid);
//Command is assumed to be wrapped by log_define script from TLQ
if(sscanf(task->command_line,"sh log_define %s %d %s %s", home_host, &home_port, tlq_workdir, log_path) == 4) {
time_t config_stoptime = time(0) + 10;
char *task_url = tlq_config_url(tlq_port, log_path, config_stoptime);
if(!task_url) {
debug(D_TLQ, "error setting task %d TLQ URL", task->taskid);
return 0;
}
debug(D_TLQ, "set task %d TLQ URL: %s", task->taskid, task_url);
return 1;
}
else {
debug(D_TLQ, "could not find task %d debug log", task->taskid);
return 0;
}
return 1;
}
else return 0;
}
/*
Send the initial "ready" message to the manager with the version and so forth.
The manager will not start sending tasks until this message is recevied.
*/
static void report_worker_ready( struct link *manager )
{
char hostname[DOMAIN_NAME_MAX];
domain_name_cache_guess(hostname);
send_manager_message(manager,"workqueue %d %s %s %s %d.%d.%d\n",WORK_QUEUE_PROTOCOL_VERSION,hostname,os_name,arch_name,CCTOOLS_VERSION_MAJOR,CCTOOLS_VERSION_MINOR,CCTOOLS_VERSION_MICRO);
send_manager_message(manager, "info worker-id %s\n", worker_id);
send_features(manager);
send_tlq_config(manager);
send_keepalive(manager, 1);
}
const char *skip_dotslash( const char *s )
{
while(!strncmp(s,"./",2)) s+=2;
return s;
}
/*
Link a file from one place to another.
If a hard link doesn't work, use a symbolic link.
If it is a directory, do it recursively.
*/
int link_recursive( const char *source, const char *target )
{
struct stat info;
if(lstat(source,&info)<0) return 0;
if(S_ISDIR(info.st_mode)) {
DIR *dir = opendir(source);
if(!dir) return 0;
mkdir(target, 0777);
struct dirent *d;
int result = 1;
while((d = readdir(dir))) {
if(!strcmp(d->d_name,".")) continue;
if(!strcmp(d->d_name,"..")) continue;
char *subsource = string_format("%s/%s",source,d->d_name);
char *subtarget = string_format("%s/%s",target,d->d_name);
result = link_recursive(subsource,subtarget);
free(subsource);
free(subtarget);
if(!result) break;
}
closedir(dir);
return result;
} else {
if(link(source, target)==0) return 1;
/*
If the hard link failed, perhaps because the source
was a directory, or if hard links are not supported
in that file system, fall back to a symlink.
*/
if(symlinks_enabled) {
/*
Use an absolute path when symlinking, otherwise the link will
be accidentally relative to the current directory.
*/
char *cwd = path_getcwd();
char *absolute_source = string_format("%s/%s", cwd, source);
int result = symlink(absolute_source, target);
free(absolute_source);
free(cwd);
if(result==0) return 1;
}
return 0;
}
}
/*
Start executing the given process on the local host,
accounting for the resources as necessary.
*/
static int start_process( struct work_queue_process *p )
{
pid_t pid;
struct work_queue_task *t = p->task;
cores_allocated += t->resources_requested->cores;
memory_allocated += t->resources_requested->memory;
disk_allocated += t->resources_requested->disk;
gpus_allocated += t->resources_requested->gpus;
if(t->resources_requested->gpus>0) {
work_queue_gpus_allocate(t->resources_requested->gpus,t->taskid);
}
if (container_mode == CONTAINER_MODE_DOCKER)
pid = work_queue_process_execute(p, container_mode, img_name);
else if (container_mode == CONTAINER_MODE_DOCKER_PRESERVE)
pid = work_queue_process_execute(p, container_mode, container_name);
else
pid = work_queue_process_execute(p, container_mode);
if(pid<0) fatal("unable to fork process for taskid %d!",p->task->taskid);
itable_insert(procs_running,pid,p);
return 1;
}
/*
Transmit the results of the given process to the manager.
If a local worker, stream the output from disk.
If a foreman, send the outputs contained in the task structure.
*/
static void report_task_complete( struct link *manager, struct work_queue_process *p )
{
int64_t output_length;
struct stat st;
if(worker_mode==WORKER_MODE_WORKER) {
fstat(p->output_fd, &st);
output_length = st.st_size;
lseek(p->output_fd, 0, SEEK_SET);
send_manager_message(manager, "result %d %d %lld %llu %d\n", p->task_status, p->exit_status, (long long) output_length, (unsigned long long) p->execution_end-p->execution_start, p->task->taskid);
link_stream_from_fd(manager, p->output_fd, output_length, time(0)+active_timeout);
total_task_execution_time += (p->execution_end - p->execution_start);
total_tasks_executed++;
} else {
struct work_queue_task *t = p->task;
if(t->output) {
output_length = strlen(t->output);
} else {
output_length = 0;
}
send_manager_message(manager, "result %d %d %lld %llu %d\n", t->result, t->return_status, (long long) output_length, (unsigned long long) t->time_workers_execute_last, t->taskid);
if(output_length) {
link_putlstring(manager, t->output, output_length, time(0)+active_timeout);
}
total_task_execution_time += t->time_workers_execute_last;
total_tasks_executed++;
}
get_task_tlq_url(p->task);
send_stats_update(manager);
}
/*
Remove one item from an itable, ignoring the key
*/
static void * itable_pop(struct itable *t )
{
uint64_t key;
void *value;
itable_firstkey(t);
if(itable_nextkey(t, &key, (void*)&value)) {
return itable_remove(t,key);
} else {
return 0;
}
}
/*
For every unreported complete task and watched file,
send the results to the manager.
*/
static void report_tasks_complete( struct link *manager )
{
struct work_queue_process *p;
while((p=itable_pop(procs_complete))) {
report_task_complete(manager,p);
}
work_queue_watcher_send_changes(watcher,manager,time(0)+active_timeout);
send_manager_message(manager, "end\n");
results_to_be_sent_msg = 0;
}
static void expire_procs_running() {
struct work_queue_process *p;
uint64_t pid;
timestamp_t current_time = timestamp_get();
itable_firstkey(procs_running);
while(itable_nextkey(procs_running, (uint64_t*)&pid, (void**)&p)) {
if(p->task->resources_requested->end > 0 && current_time > (uint64_t) p->task->resources_requested->end)
{
p->task_status = WORK_QUEUE_RESULT_TASK_TIMEOUT;
kill(pid, SIGKILL);
}
}
}
/*
Scan over all of the processes known by the worker,
and if they have exited, move them into the procs_complete table
for later processing.
*/
static int handle_tasks(struct link *manager)
{
struct work_queue_process *p;
pid_t pid;
int status;
itable_firstkey(procs_running);
while(itable_nextkey(procs_running, (uint64_t*)&pid, (void**)&p)) {
int result = wait4(pid, &status, WNOHANG, &p->rusage);
if(result==0) {
// pid is still going
} else if(result<0) {
debug(D_WQ, "wait4 on pid %d returned an error: %s",pid,strerror(errno));
} else if(result>0) {
if (!WIFEXITED(status)){
p->exit_status = WTERMSIG(status);
debug(D_WQ, "task %d (pid %d) exited abnormally with signal %d",p->task->taskid,p->pid,p->exit_status);
} else {
p->exit_status = WEXITSTATUS(status);
FILE *loop_full_check;
char *buf = malloc(PATH_MAX);
char *pwd = getcwd(buf, PATH_MAX);
char *disk_alloc_filename = work_queue_generate_disk_alloc_full_filename(pwd, p->task->taskid);
if(p->loop_mount == 1 && (loop_full_check = fopen(disk_alloc_filename, "r"))) {
p->task_status = WORK_QUEUE_RESULT_DISK_ALLOC_FULL;
p->task->disk_allocation_exhausted = 1;
fclose(loop_full_check);
unlink(disk_alloc_filename);
}
free(buf);
free(disk_alloc_filename);
debug(D_WQ, "task %d (pid %d) exited normally with exit code %d",p->task->taskid,p->pid,p->exit_status);
}
p->execution_end = timestamp_get();
cores_allocated -= p->task->resources_requested->cores;
memory_allocated -= p->task->resources_requested->memory;
disk_allocated -= p->task->resources_requested->disk;
gpus_allocated -= p->task->resources_requested->gpus;
work_queue_gpus_free(p->task->taskid);
itable_remove(procs_running, p->pid);
itable_firstkey(procs_running);
// Output files must be moved back into the cache directory.
struct work_queue_file *f;
list_first_item(p->task->output_files);
while((f = list_next_item(p->task->output_files))) {
char *sandbox_name = string_format("%s/%s",p->sandbox,f->remote_name);
debug(D_WQ,"moving output file from %s to %s",sandbox_name,f->payload);
/* First we try a cheap rename. It that does not work, we try to copy the file. */
if(rename(sandbox_name,f->payload) == -1) {
debug(D_WQ, "could not rename output file %s to %s: %s",sandbox_name,f->payload,strerror(errno));
if(copy_file_to_file(sandbox_name, f->payload) == -1) {
debug(D_WQ, "could not copy output file %s to %s: %s",sandbox_name,f->payload,strerror(errno));
}
}
free(sandbox_name);
}
itable_insert(procs_complete, p->task->taskid, p);
}
}
return 1;
}
/**
* Stream file/directory contents for the recursive get/put protocol.
* Format:
* for a directory: a new line in the format of "dir $DIR_NAME 0"
* for a file: a new line in the format of "file $FILE_NAME $FILE_LENGTH"
* then file contents.
* string "end" at the end of the stream (on a new line).
*
* Example:
* Assume we have the following directory structure:
* mydir
* -- 1.txt
* -- 2.txt
* -- mysubdir
* -- a.txt
* -- b.txt
* -- z.jpg
*
* The stream contents would be:
*
* dir mydir 0
* file 1.txt $file_len
* $$ FILE 1.txt's CONTENTS $$
* file 2.txt $file_len
* $$ FILE 2.txt's CONTENTS $$
* dir mysubdir 0
* file mysubdir/a.txt $file_len
* $$ FILE mysubdir/a.txt's CONTENTS $$
* file mysubdir/b.txt $file_len
* $$ FILE mysubdir/b.txt's CONTENTS $$
* file z.jpg $file_len
* $$ FILE z.jpg's CONTENTS $$
* end
*
*/
static int stream_output_item(struct link *manager, const char *filename, int recursive)
{
DIR *dir;
struct dirent *dent;
char dentline[WORK_QUEUE_LINE_MAX];
char cached_filename[WORK_QUEUE_LINE_MAX];
struct stat info;
int64_t actual, length;
int fd;
string_nformat(cached_filename, sizeof(cached_filename), "cache/%s", filename);
if(stat(cached_filename, &info) != 0) {
goto failure;
}
if(S_ISDIR(info.st_mode)) {
// stream a directory
dir = opendir(cached_filename);
if(!dir) {
goto failure;
}
send_manager_message(manager, "dir %s 0\n", filename);
while(recursive && (dent = readdir(dir))) {
if(!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
continue;
string_nformat(dentline, sizeof(dentline), "%s/%s", filename, dent->d_name);
stream_output_item(manager, dentline, recursive);
}
closedir(dir);
} else {
// stream a file
fd = open(cached_filename, O_RDONLY, 0);
if(fd >= 0) {
length = info.st_size;
send_manager_message(manager, "file %s %"PRId64"\n", filename, length );
actual = link_stream_from_fd(manager, fd, length, time(0) + active_timeout);
close(fd);
if(actual != length) {
debug(D_WQ, "Sending back output file - %s failed: bytes to send = %"PRId64" and bytes actually sent = %"PRId64".", filename, length, actual);
return 0;
}
} else {
goto failure;
}
}
return 1;
failure:
send_manager_message(manager, "missing %s %d\n", filename, errno);
return 0;
}
/*
For each of the files and directories needed by a task, link
them into the sandbox. Return true if successful.
*/
int setup_sandbox( struct work_queue_process *p )
{
struct work_queue_file *f;
list_first_item(p->task->input_files);
while((f = list_next_item(p->task->input_files))) {
char *sandbox_name = string_format("%s/%s",skip_dotslash(p->sandbox),f->remote_name);
int result = 0;
// remote name may contain relative path components, so create them in advance
create_dir_parents(sandbox_name,0777);
if(f->type == WORK_QUEUE_DIRECTORY) {
debug(D_WQ,"creating directory %s",sandbox_name);
result = create_dir(sandbox_name, 0700);
if(!result) debug(D_WQ,"couldn't create directory %s: %s", sandbox_name, strerror(errno));
} else {
debug(D_WQ,"linking %s to %s",f->payload,sandbox_name);
result = link_recursive(skip_dotslash(f->payload),skip_dotslash(sandbox_name));
if(!result) {
if(errno==EEXIST) {
// XXX silently ignore the case where the target file exists.
// This happens when managers apps map the same input file twice, or to the same name.
// Would be better to reject this at the manager instead.
result = 1;
} else {
debug(D_WQ,"couldn't link %s into sandbox as %s: %s",f->payload,sandbox_name,strerror(errno));
}
}
}
free(sandbox_name);
if(!result) return 0;
}
return 1;
}
/*
For a task run locally, if the resources are all set to -1,
then assume that the task occupies all worker resources.
Otherwise, just make sure all values are non-zero.
*/
static void normalize_resources( struct work_queue_process *p )
{
struct work_queue_task *t = p->task;
if(t->resources_requested->cores < 0 && t->resources_requested->memory < 0 && t->resources_requested->disk < 0 && t->resources_requested->gpus < 0) {
t->resources_requested->cores = local_resources->cores.total;
t->resources_requested->memory = local_resources->memory.total;
t->resources_requested->disk = local_resources->disk.total;
t->resources_requested->gpus = local_resources->gpus.total;
} else {
t->resources_requested->cores = MAX(t->resources_requested->cores, 0);
t->resources_requested->memory = MAX(t->resources_requested->memory, 0);
t->resources_requested->disk = MAX(t->resources_requested->disk, 0);
t->resources_requested->gpus = MAX(t->resources_requested->gpus, 0);
}
}
/*
Handle an incoming task message from the manager.
Generate a work_queue_process wrapped around a work_queue_task,
and deposit it into the waiting list or the foreman_q as appropriate.
*/
static int do_task( struct link *manager, int taskid, time_t stoptime )
{
char line[WORK_QUEUE_LINE_MAX];
char filename[WORK_QUEUE_LINE_MAX];
char localname[WORK_QUEUE_LINE_MAX];
char taskname[WORK_QUEUE_LINE_MAX];
char taskname_encoded[WORK_QUEUE_LINE_MAX];
char category[WORK_QUEUE_LINE_MAX];
int flags, length;
int64_t n;
int disk_alloc = disk_allocation;
timestamp_t nt;
struct work_queue_task *task = work_queue_task_create(0);
task->taskid = taskid;
while(recv_manager_message(manager,line,sizeof(line),stoptime)) {
if(!strcmp(line,"end")) {
break;
} else if(sscanf(line, "category %s",category)) {
work_queue_task_specify_category(task, category);
} else if(sscanf(line,"cmd %d",&length)==1) {
char *cmd = malloc(length+1);
link_read(manager,cmd,length,stoptime);
cmd[length] = 0;
work_queue_task_specify_command(task,cmd);
debug(D_WQ,"rx from manager: %s",cmd);
free(cmd);
} else if(sscanf(line,"infile %s %s %d", filename, taskname_encoded, &flags)) {
string_nformat(localname, sizeof(localname), "cache/%s", filename);
url_decode(taskname_encoded, taskname, WORK_QUEUE_LINE_MAX);
work_queue_task_specify_file(task, localname, taskname, WORK_QUEUE_INPUT, flags);
} else if(sscanf(line,"outfile %s %s %d", filename, taskname_encoded, &flags)) {
string_nformat(localname, sizeof(localname), "cache/%s", filename);
url_decode(taskname_encoded, taskname, WORK_QUEUE_LINE_MAX);
work_queue_task_specify_file(task, localname, taskname, WORK_QUEUE_OUTPUT, flags);
} else if(sscanf(line, "dir %s", filename)) {
work_queue_task_specify_directory(task, filename, filename, WORK_QUEUE_INPUT, 0700, 0);
} else if(sscanf(line,"cores %" PRId64,&n)) {
work_queue_task_specify_cores(task, n);
} else if(sscanf(line,"memory %" PRId64,&n)) {
work_queue_task_specify_memory(task, n);
} else if(sscanf(line,"disk %" PRId64,&n)) {
work_queue_task_specify_disk(task, n);
} else if(sscanf(line,"gpus %" PRId64,&n)) {
work_queue_task_specify_gpus(task, n);
} else if(sscanf(line,"wall_time %" PRIu64,&nt)) {
work_queue_task_specify_running_time(task, nt);
} else if(sscanf(line,"end_time %" PRIu64,&nt)) {
work_queue_task_specify_end_time(task, nt);
} else if(sscanf(line,"env %d",&length)==1) {
char *env = malloc(length+2); /* +2 for \n and \0 */
link_read(manager, env, length+1, stoptime);
env[length] = 0; /* replace \n with \0 */
char *value = strchr(env,'=');
if(value) {
*value = 0;
value++;
work_queue_task_specify_environment_variable(task,env,value);
}
free(env);
} else {
debug(D_WQ|D_NOTICE,"invalid command from manager: %s",line);
return 0;
}
}
last_task_received = task->taskid;
struct work_queue_process *p = work_queue_process_create(task, disk_alloc);
if(!p) {
return 0;
}
// Every received task goes into procs_table.
itable_insert(procs_table,taskid,p);
if(worker_mode==WORKER_MODE_FOREMAN) {
work_queue_submit_internal(foreman_q,task);
} else {
// XXX sandbox setup should be done in task execution,
// so that it can be returned cleanly as a failure to execute.
if(!setup_sandbox(p)) {
itable_remove(procs_table,taskid);
work_queue_process_delete(p);
return 0;
}
normalize_resources(p);
list_push_tail(procs_waiting,p);
}
work_queue_watcher_add_process(watcher,p);
return 1;
}
/*
Return false if name is invalid as a simple filename.
For example, if it contains a slash, which would escape
the current working directory.
*/
int is_valid_filename( const char *name )
{
if(strchr(name,'/')) return 0;
return 1;
}
/*
Handle an incoming symbolic link inside the rput protocol.
The filename of the symlink was already given in the message,
and the target of the symlink is given as the "body" which
must be read off of the wire. The symlink target does not
need to be url_decoded because it is sent in the body.
*/
static int do_put_symlink_internal( struct link *manager, char *filename, int length )
{
char *target = malloc(length);
int actual = link_read(manager,target,length,time(0)+active_timeout);
if(actual!=length) {
free(target);
return 0;
}
int result = symlink(target,filename);
if(result<0) {
debug(D_WQ,"could not create symlink %s: %s",filename,strerror(errno));
free(target);
return 0;
}
free(target);
return 1;
}
/*
Handle an incoming file inside the rput protocol.
Notice that we trust the caller to have created
the necessary parent directories and checked the
name for validity.
*/
static int do_put_file_internal( struct link *manager, char *filename, int64_t length, int mode )
{
if(!check_disk_space_for_filesize(".", length, 0)) {
debug(D_WQ, "Could not put file %s, not enough disk space (%"PRId64" bytes needed)\n", filename, length);
return 0;
}
/* Ensure that worker can access the file! */
mode = mode | 0600;
int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, mode);
if(fd<0) {
debug(D_WQ, "Could not open %s for writing. (%s)\n", filename, strerror(errno));
return 0;
}
int64_t actual = link_stream_to_fd(manager, fd, length, time(0) + active_timeout);
close(fd);
if(actual!=length) {
debug(D_WQ, "Failed to put file - %s (%s)\n", filename, strerror(errno));
return 0;
}
return 1;
}
/*
Handle an incoming directory inside the recursive dir protocol.
Notice that we have already checked the dirname for validity,
and now we process "put" and "dir" commands within the list
until "end" is reached. Note that "put" is used instead of
"file" for historical reasons, to support recursive reuse
of existing code.
*/
static int do_put_dir_internal( struct link *manager, char *dirname )
{
char line[WORK_QUEUE_LINE_MAX];
char name_encoded[WORK_QUEUE_LINE_MAX];
char name[WORK_QUEUE_LINE_MAX];
int64_t size;
int mode;
int result = mkdir(dirname,0777);
if(result<0) {
debug(D_WQ,"unable to create %s: %s",dirname,strerror(errno));
return 0;
}
while(1) {
if(!recv_manager_message(manager,line,sizeof(line),time(0)+active_timeout)) return 0;
int r = 0;
if(sscanf(line,"put %s %" SCNd64 " %o",name_encoded,&size,&mode)==3) {
url_decode(name_encoded,name,sizeof(name));
if(!is_valid_filename(name)) return 0;
char *subname = string_format("%s/%s",dirname,name);
r = do_put_file_internal(manager,subname,size,mode);
free(subname);
} else if(sscanf(line,"symlink %s %" SCNd64,name_encoded,&size)==2) {
url_decode(name_encoded,name,sizeof(name));
if(!is_valid_filename(name)) return 0;
char *subname = string_format("%s/%s",dirname,name);
r = do_put_symlink_internal(manager,subname,size);
free(subname);
} else if(sscanf(line,"dir %s",name_encoded)==1) {
url_decode(name_encoded,name,sizeof(name));
if(!is_valid_filename(name)) return 0;
char *subname = string_format("%s/%s",dirname,name);
r = do_put_dir_internal(manager,subname);
free(subname);
} else if(!strcmp(line,"end")) {
break;
}
if(!r) return 0;
}
return 1;
}
static int do_put_dir( struct link *manager, char *dirname )
{
if(!is_valid_filename(dirname)) return 0;
char * cachename = string_format("cache/%s",dirname);
int result = do_put_dir_internal(manager,cachename);
free(cachename);
return result;
}
/*
This is the old method for sending a single file.
It works, but it has the deficiency that the manager
expects the worker to create all parent directories
for the file, which is horrifically expensive when
sending a large directory tree. The direction put
protocol (above) is preferred instead.
*/
static int do_put_single_file( struct link *manager, char *filename, int64_t length, int mode )
{
if(!path_within_dir(filename, workspace)) {
debug(D_WQ, "Path - %s is not within workspace %s.", filename, workspace);
return 0;
}
char * cached_filename = string_format("cache/%s",filename);
if(strchr(filename,'/')) {
char dirname[WORK_QUEUE_LINE_MAX];
path_dirname(filename,dirname);
if(!create_dir(dirname,0777)) {
debug(D_WQ, "could not create directory %s: %s",dirname,strerror(errno));
free(cached_filename);
return 0;
}
}
int result = do_put_file_internal(manager,cached_filename,length,mode);
free(cached_filename);
return result;
}
static int file_from_url(const char *url, const char *filename) {
debug(D_WQ, "Retrieving %s from (%s)\n", filename, url);
char command[WORK_QUEUE_LINE_MAX];
string_nformat(command, sizeof(command), "curl -f -o \"%s\" \"%s\"", filename, url);
if (system(command) == 0) {
debug(D_WQ, "Success, file retrieved from %s\n", url);
} else {
debug(D_WQ, "Failed to retrieve file from %s\n", url);
return 0;
}
return 1;
}
static int do_url(struct link* manager, const char *filename, int length, int mode) {
char url[WORK_QUEUE_LINE_MAX];
link_read(manager, url, length, time(0) + active_timeout);
char cache_name[WORK_QUEUE_LINE_MAX];
string_nformat(cache_name, sizeof(cache_name), "cache/%s", filename);
return file_from_url(url, cache_name);
}
static int do_tlq_url(const char *manager_tlq_url) {
debug(D_TLQ, "set manager TLQ URL: %s", manager_tlq_url);
return 1;
}
static int do_unlink(const char *path)
{
char cached_path[WORK_QUEUE_LINE_MAX];
string_nformat(cached_path, sizeof(cached_path), "cache/%s", path);
if(!path_within_dir(cached_path, workspace)) {
debug(D_WQ, "%s is not within workspace %s",cached_path,workspace);
return 0;
}
//Use delete_dir() since it calls unlink() if path is a file.
if(delete_dir(cached_path) != 0) {
struct stat buf;
if(stat(cached_path, &buf) != 0) {
if(errno == ENOENT) {
// If the path does not exist, return success
return 1;
}
}
// Failed to do unlink
return 0;
}
return 1;
}
static int do_get(struct link *manager, const char *filename, int recursive) {
stream_output_item(manager, filename, recursive);
send_manager_message(manager, "end\n");
return 1;
}
static int do_thirdget(int mode, char *filename, const char *path) {
char cmd[WORK_QUEUE_LINE_MAX];
char cached_filename[WORK_QUEUE_LINE_MAX];
char *cur_pos;
char *cmd_tmp;
struct stat info;
if(mode != WORK_QUEUE_FS_CMD) {
if(stat(path, &info) != 0) {
debug(D_WQ, "Path %s not accessible. (%s)\n", path, strerror(errno));
return 0;
}
if(!strcmp(filename, path)) {
debug(D_WQ, "thirdget aborted: filename (%s) and path (%s) are the same\n", filename, path);
return 1;
}
}
cur_pos = filename;
while(!strncmp(cur_pos, "./", 2)) {
cur_pos += 2;
}
string_nformat(cached_filename, sizeof(cached_filename), "cache/%s", cur_pos);
cur_pos = strrchr(cached_filename, '/');
if(cur_pos) {
*cur_pos = '\0';
if(!create_dir(cached_filename, mode | 0700)) {
debug(D_WQ, "Could not create directory - %s (%s)\n", cached_filename, strerror(errno));
return 0;
}
*cur_pos = '/';
}
if(stat(cached_filename, &info) == 0) {
/* file is already present */
return 1;
}
switch (mode) {
case WORK_QUEUE_FS_SYMLINK:
if(symlink(path, cached_filename) != 0) {
debug(D_WQ, "Could not thirdget %s, symlink (%s) failed. (%s)\n", filename, path, strerror(errno));
return 0;
}
/* falls through */
case WORK_QUEUE_FS_PATH:
string_nformat(cmd, sizeof(cmd), "/bin/cp %s %s", path, cached_filename);
if(system(cmd) != 0) {
debug(D_WQ, "Could not thirdget %s, copy (%s) failed. (%s)\n", filename, path, strerror(errno));
return 0;
}
break;
case WORK_QUEUE_FS_CMD:
cmd_tmp = string_replace_percents(path, cached_filename);
string_nformat(cmd, sizeof(cmd), "%s", cmd_tmp);
free(cmd_tmp);
debug(D_WQ, "Transfering %s via cmd: %s", cached_filename, cmd);
if(system(cmd) != 0) {
debug(D_WQ, "Could not thirdget %s, command (%s) failed. (%s)\n", filename, cmd, strerror(errno));
return 0;
}
break;
}
return 1;
}
static int do_thirdput(struct link *manager, int mode, char *filename, const char *path) {
struct stat info;
char cmd[WORK_QUEUE_LINE_MAX];
char cached_filename[WORK_QUEUE_LINE_MAX];
char *cur_pos;
int result = 1;
cur_pos = filename;
while(!strncmp(cur_pos, "./", 2)) {
cur_pos += 2;
}
string_nformat(cached_filename, sizeof(cached_filename), "cache/%s", cur_pos);
if(stat(cached_filename, &info) != 0) {
debug(D_WQ, "File %s not accessible. (%s)\n", cached_filename, strerror(errno));
result = 0;
}
switch (mode) {
case WORK_QUEUE_FS_SYMLINK:
case WORK_QUEUE_FS_PATH:
if(!strcmp(filename, path)) {
debug(D_WQ, "thirdput aborted: filename (%s) and path (%s) are the same\n", filename, path);
result = 1;
}
cur_pos = strrchr(path, '/');
if(cur_pos) {
*cur_pos = '\0';
if(!create_dir(path, mode | 0700)) {
debug(D_WQ, "Could not create directory - %s (%s)\n", path, strerror(errno));
result = 0;
*cur_pos = '/';
break;
}
*cur_pos = '/';
}
string_nformat(cmd, sizeof(cmd), "/bin/cp -r %s %s", cached_filename, path);
if(system(cmd) != 0) {
debug(D_WQ, "Could not thirdput %s, copy (%s) failed. (%s)\n", cached_filename, path, strerror(errno));
result = 0;
}
break;
case WORK_QUEUE_FS_CMD:
string_nformat(cmd, sizeof(cmd), "%s < %s", path, cached_filename);
if(system(cmd) != 0) {
debug(D_WQ, "Could not thirdput %s, command (%s) failed. (%s)\n", filename, cmd, strerror(errno));
result = 0;
}
break;
}
send_manager_message(manager, "thirdput-complete %d\n", result);
return result;
}
/*
do_kill removes a process currently known by the worker.
Note that a kill message from the manager is used for every case
where a task is to be removed, whether it is waiting, running,
of finished. Regardless of the state, we kill the process and
remove all of the associated files and other state.
*/
static int do_kill(int taskid)
{
struct work_queue_process *p;
p = itable_remove(procs_table, taskid);
if(!p) {
debug(D_WQ,"manager requested kill of task %d which does not exist!",taskid);
return 1;
}
if(worker_mode == WORKER_MODE_FOREMAN) {
work_queue_cancel_by_taskid(foreman_q, taskid);
} else {
if(itable_remove(procs_running, p->pid)) {
work_queue_process_kill(p);
cores_allocated -= p->task->resources_requested->cores;
memory_allocated -= p->task->resources_requested->memory;
disk_allocated -= p->task->resources_requested->disk;
gpus_allocated -= p->task->resources_requested->gpus;
work_queue_gpus_free(taskid);
}
}
itable_remove(procs_complete, p->task->taskid);
list_remove(procs_waiting,p);
work_queue_watcher_remove_process(watcher,p);
work_queue_process_delete(p);
return 1;
}
/*
Kill off all known tasks by iterating over the complete
procs_table and calling do_kill. This should result in
all empty procs_* structures and zero resources allocated.
If this failed to bring the system back to a fresh state,
then we need to abort to clean things up.
*/
static void kill_all_tasks() {
struct work_queue_process *p;
uint64_t taskid;
itable_firstkey(procs_table);
while(itable_nextkey(procs_table,&taskid,(void**)&p)) {
do_kill(taskid);
}
assert(itable_size(procs_table)==0);
assert(itable_size(procs_running)==0);
assert(itable_size(procs_complete)==0);
assert(list_size(procs_waiting)==0);
assert(cores_allocated==0);
assert(memory_allocated==0);
assert(disk_allocated==0);
assert(gpus_allocated==0);
debug(D_WQ,"all data structures are clean");
}
/* Remove a file, even when mark as cached. Foreman broadcast this message to
* foremen down its hierarchy. It is invalid for a worker to receice this message. */
static int do_invalidate_file(const char *filename) {
if(worker_mode == WORKER_MODE_FOREMAN) {
work_queue_invalidate_cached_file_internal(foreman_q, filename);
return 1;
}
return -1;
}
static void finish_running_task(struct work_queue_process *p, work_queue_result_t result) {
p->task_status |= result;
kill(p->pid, SIGKILL);
}
static void finish_running_tasks(work_queue_result_t result) {
struct work_queue_process *p;
pid_t pid;
itable_firstkey(procs_running);
while(itable_nextkey(procs_running, (uint64_t*) &pid, (void**)&p)) {
finish_running_task(p, result);
}
}
static int enforce_process_limits(struct work_queue_process *p) {
/* If the task did not specify disk usage, return right away. */
if(p->disk < 1)
return 1;
work_queue_process_measure_disk(p, max_time_on_measurement);
if(p->sandbox_size > p->task->resources_requested->disk) {
debug(D_WQ,"Task %d went over its disk size limit: %s > %s\n",
p->task->taskid,
rmsummary_resource_to_str(p->sandbox_size, /* with units */ 1),
rmsummary_resource_to_str(p->task->resources_requested->disk, 1));
return 0;
}
return 1;
}
static int enforce_processes_limits() {
static time_t last_check_time = 0;
struct work_queue_process *p;
pid_t pid;
int ok = 1;
/* Do not check too often, as it is expensive (particularly disk) */
if((time(0) - last_check_time) < check_resources_interval ) return 1;
itable_firstkey(procs_table);
while(itable_nextkey(procs_table,(uint64_t*)&pid,(void**)&p)) {
if(!enforce_process_limits(p)) {
finish_running_task(p, WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION);
/* we delete the sandbox, to free the exhausted resource. If a loop device is used, use remove loop device*/
if(p->loop_mount == 1) {
disk_alloc_delete(p->sandbox);
}
else {
delete_dir(p->sandbox);
}
ok = 0;
}
}
last_check_time = time(0);
return ok;
}
/* We check maximum_running_time by itself (not in enforce_processes_limits),
* as other running tasks should not be affected by a task timeout. */
static void enforce_processes_max_running_time() {
struct work_queue_process *p;
pid_t pid;
timestamp_t now = timestamp_get();
itable_firstkey(procs_running);
while(itable_nextkey(procs_running, (uint64_t*) &pid, (void**) &p)) {
/* If the task did not specify wall_time, return right away. */
if(p->task->resources_requested->wall_time < 1)
continue;
if(now < p->execution_start + p->task->resources_requested->wall_time) {
debug(D_WQ,"Task %d went over its running time limit: %s > %s\n",
p->task->taskid,
rmsummary_resource_to_str("wall_time", now - p->execution_start, 1),
rmsummary_resource_to_str("wall_time", p->task->resources_requested->wall_time, 1));
p->task_status = WORK_QUEUE_RESULT_TASK_MAX_RUN_TIME;
kill(pid, SIGKILL);
}
}
return;
}
static int do_release() {
debug(D_WQ, "released by manager %s:%d.\n", current_manager_address->addr, current_manager_address->port);
released_by_manager = 1;
return 0;
}
static void disconnect_manager(struct link *manager) {
debug(D_WQ, "disconnecting from manager %s:%d", current_manager_address->addr, current_manager_address->port);
link_close(manager);
debug(D_WQ, "killing all outstanding tasks");
kill_all_tasks();
//KNOWN HACK: We remove all workers on a manager disconnection to avoid
//returning old tasks to a new manager.
if(foreman_q) {
debug(D_WQ, "Disconnecting all workers...\n");
release_all_workers(foreman_q);
if(project_regex) {
update_catalog(foreman_q, manager, 1);
}
}
if(released_by_manager) {
released_by_manager = 0;
} else if(abort_flag) {
// Bail out quickly
} else {
sleep(5);
}
}
static int handle_manager(struct link *manager) {
char line[WORK_QUEUE_LINE_MAX];
char filename_encoded[WORK_QUEUE_LINE_MAX];
char filename[WORK_QUEUE_LINE_MAX];
char manager_tlq_url[WORK_QUEUE_LINE_MAX];
char path[WORK_QUEUE_LINE_MAX];
int64_t length;
int64_t taskid = 0;
int mode, r, n;
if(recv_manager_message(manager, line, sizeof(line), idle_stoptime )) {
if(sscanf(line,"task %" SCNd64, &taskid)==1) {
r = do_task(manager, taskid,time(0)+active_timeout);
} else if(sscanf(line,"put %s %"SCNd64" %o",filename_encoded,&length,&mode)==3) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_put_single_file(manager, filename, length, mode);
reset_idle_timer();
} else if(sscanf(line, "dir %s", filename_encoded)==1) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_put_dir(manager,filename);
reset_idle_timer();
} else if(sscanf(line, "url %s %" SCNd64 " %o", filename, &length, &mode) == 3) {
r = do_url(manager, filename, length, mode);
reset_idle_timer();
} else if(sscanf(line, "tlq %s", manager_tlq_url) == 1) {
r = do_tlq_url(manager_tlq_url);
reset_idle_timer();
} else if(sscanf(line, "unlink %s", filename_encoded) == 1) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_unlink(filename);
} else if(sscanf(line, "get %s %d", filename_encoded, &mode) == 2) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_get(manager, filename, mode);
} else if(sscanf(line, "thirdget %o %s %[^\n]", &mode, filename_encoded, path) == 3) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_thirdget(mode, filename, path);
} else if(sscanf(line, "thirdput %o %s %[^\n]", &mode, filename_encoded, path) == 3) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_thirdput(manager, mode, filename, path);
reset_idle_timer();
} else if(sscanf(line, "kill %" SCNd64, &taskid) == 1) {
if(taskid >= 0) {
r = do_kill(taskid);
} else {
kill_all_tasks();
r = 1;
}
} else if(sscanf(line, "invalidate-file %s", filename_encoded) == 1) {
url_decode(filename_encoded,filename,sizeof(filename));
r = do_invalidate_file(filename);
} else if(!strncmp(line, "release", 8)) {
r = do_release();
} else if(!strncmp(line, "exit", 5)) {
work_queue_broadcast_message(foreman_q, "exit\n");
abort_flag = 1;
r = 1;
} else if(!strncmp(line, "check", 6)) {
r = send_keepalive(manager, 0);
} else if(!strncmp(line, "auth", 4)) {
fprintf(stderr,"work_queue_worker: this manager requires a password. (use the -P option)\n");
r = 0;
} else if(sscanf(line, "send_results %d", &n) == 1) {
report_tasks_complete(manager);
r = 1;
} else {
debug(D_WQ, "Unrecognized manager message: %s.\n", line);
r = 0;
}
} else {
debug(D_WQ, "Failed to read from manager.\n");
r = 0;
}
return r;
}
/*
Return true if this task can run with the resources currently available.
*/
static int task_resources_fit_now(struct work_queue_task *t)
{
return
(cores_allocated + t->resources_requested->cores <= local_resources->cores.total) &&
(memory_allocated + t->resources_requested->memory <= local_resources->memory.total) &&
(disk_allocated + t->resources_requested->disk <= local_resources->disk.total) &&
(gpus_allocated + t->resources_requested->gpus <= local_resources->gpus.total);
}
/*
Return true if this task can eventually run with the resources available. For
example, this is needed for when the worker is launched without the --memory
option, and the free available memory of the system is consumed by some other
process.
*/
static int task_resources_fit_eventually(struct work_queue_task *t)
{
struct work_queue_resources *r;
if(worker_mode == WORKER_MODE_FOREMAN) {
r = total_resources;
}
else {
r = local_resources;
}
return
(t->resources_requested->cores <= r->cores.largest) &&
(t->resources_requested->memory <= r->memory.largest) &&
(t->resources_requested->disk <= r->disk.largest) &&
(t->resources_requested->gpus <= r->gpus.largest);
}
void forsake_waiting_process(struct link *manager, struct work_queue_process *p) {
/* the task cannot run in this worker */
p->task_status = WORK_QUEUE_RESULT_FORSAKEN;
itable_insert(procs_complete, p->task->taskid, p);
debug(D_WQ, "Waiting task %d has been forsaken.", p->task->taskid);
/* we also send updated resources to the manager. */
send_keepalive(manager, 1);
}
/*
If 0, the worker is using more resources than promised. 1 if resource usage holds that promise.
*/
static int enforce_worker_limits(struct link *manager) {
if( manual_wall_time_option > 0 && (time(0) - worker_start_time) > manual_wall_time_option) {
fprintf(stderr,"work_queue_worker: reached the wall time limit %"PRIu64" s\n", (uint64_t)manual_wall_time_option);
if(manager) {
send_manager_message(manager, "info wall_time_exhausted %"PRIu64"\n", (uint64_t)manual_wall_time_option);
}
return 0;
}
if( manual_disk_option > 0 && local_resources->disk.inuse > manual_disk_option ) {
fprintf(stderr,"work_queue_worker: %s used more than declared disk space (--disk - < disk used) %"PRIu64" < %"PRIu64" MB\n", workspace, manual_disk_option, local_resources->disk.inuse);
if(manager) {
send_manager_message(manager, "info disk_exhausted %lld\n", (long long) local_resources->disk.inuse);
}
return 0;
}
if(manual_memory_option > 0 && local_resources->memory.inuse > manual_memory_option) {
fprintf(stderr,"work_queue_worker: used more than declared memory (--memory < memory used) %"PRIu64" < %"PRIu64" MB\n", manual_memory_option, local_resources->memory.inuse);
if(manager) {
send_manager_message(manager, "info memory_exhausted %lld\n", (long long) local_resources->memory.inuse);
}
return 0;
}
return 1;
}
/*
If 0, the worker has less resources than promised. 1 otherwise.
*/
static int enforce_worker_promises(struct link *manager) {
if( manual_disk_option > 0 && local_resources->disk.total < manual_disk_option) {
fprintf(stderr,"work_queue_worker: has less than the promised disk space (--disk > disk total) %"PRIu64" < %"PRIu64" MB\n", manual_disk_option, local_resources->disk.total);
if(manager) {
send_manager_message(manager, "info disk_error %lld\n", (long long) local_resources->disk.total);
}
return 0;
}
return 1;
}
static void work_for_manager(struct link *manager) {
sigset_t mask;
debug(D_WQ, "working for manager at %s:%d.\n", current_manager_address->addr, current_manager_address->port);
sigemptyset(&mask);
sigaddset(&mask, SIGCHLD);
sigaddset(&mask, SIGTERM);
sigaddset(&mask, SIGQUIT);
sigaddset(&mask, SIGINT);
sigaddset(&mask, SIGUSR1);
sigaddset(&mask, SIGUSR2);
reset_idle_timer();
time_t volatile_stoptime = time(0) + 60;
// Start serving managers
while(!abort_flag) {
if(time(0) > idle_stoptime) {
debug(D_NOTICE, "disconnecting from %s:%d because I did not receive any task in %d seconds (--idle-timeout).\n", current_manager_address->addr,current_manager_address->port,idle_timeout);
send_manager_message(manager, "info idle-disconnecting %lld\n", (long long) idle_timeout);
break;
}
if(worker_volatility && time(0) > volatile_stoptime) {
if( (double)rand()/(double)RAND_MAX < worker_volatility) {
debug(D_NOTICE, "work_queue_worker: disconnect from manager due to volatility check.\n");
break;
} else {
volatile_stoptime = time(0) + 60;
}
}
if (initial_ppid != 0 && getppid() != initial_ppid) {
debug(D_NOTICE, "parent process exited, shutting down\n");
break;
}
/*
link_usleep will cause the worker to sleep for a time until
interrupted by a SIGCHILD signal. However, the signal could
have been delivered while we were outside of the wait function,
setting sigchld_received_flag. In that case, do not block
but proceed with the
There is a still a (very small) race condition in that the
signal could be received between the check and link_usleep,
hence a maximum wait time of five seconds is enforced.
*/
int wait_msec = 5000;
if(sigchld_received_flag) {
wait_msec = 0;
sigchld_received_flag = 0;
}
int manager_activity = link_usleep_mask(manager, wait_msec*1000, &mask, 1, 0);
if(manager_activity < 0) break;
int ok = 1;
if(manager_activity) {
ok &= handle_manager(manager);
}
expire_procs_running();
ok &= handle_tasks(manager);
measure_worker_resources();
if(!enforce_worker_promises(manager)) {
abort_flag = 1;
break;
}
enforce_processes_max_running_time();
/* end a running processes if goes above its declared limits.
* Mark offending process as RESOURCE_EXHASTION. */
enforce_processes_limits();
/* end running processes if worker resources are exhasusted, and marked
* them as FORSAKEN, so they can be resubmitted somewhere else. */
if(!enforce_worker_limits(manager)) {
finish_running_tasks(WORK_QUEUE_RESULT_FORSAKEN);
// finish all tasks, disconnect from manager, but don't kill the worker (no abort_flag = 1)
break;
}
int task_event = 0;
if(ok) {
struct work_queue_process *p;
int visited;
int waiting = list_size(procs_waiting);
for(visited = 0; visited < waiting; visited++) {
p = list_pop_head(procs_waiting);
if(!p) {
break;
} else if(task_resources_fit_now(p->task)) {
start_process(p);
task_event++;
} else if(task_resources_fit_eventually(p->task)) {
list_push_tail(procs_waiting, p);
} else {
forsake_waiting_process(manager, p);
task_event++;
}
}
}
if(task_event > 0) {
send_stats_update(manager);
}
if(ok && !results_to_be_sent_msg) {
if(work_queue_watcher_check(watcher) || itable_size(procs_complete) > 0) {
send_manager_message(manager, "available_results\n");
results_to_be_sent_msg = 1;
}
}
if(!ok) {
break;
}
//Reset idle_stoptime if something interesting is happening at this worker.
if(list_size(procs_waiting) > 0 || itable_size(procs_table) > 0 || itable_size(procs_complete) > 0) {
reset_idle_timer();
}
}
}
static void foreman_for_manager(struct link *manager) {
int manager_active = 0;
if(!manager) {
return;
}
debug(D_WQ, "working for manager at %s:%d as foreman.\n", current_manager_address->addr, current_manager_address->port);
reset_idle_timer();
int prev_num_workers = 0;
while(!abort_flag) {
int result = 1;
struct work_queue_task *task = NULL;
if(time(0) > idle_stoptime && work_queue_empty(foreman_q)) {
debug(D_NOTICE, "giving up because did not receive any task in %d seconds.\n", idle_timeout);
send_manager_message(manager, "info idle-disconnecting %lld\n", (long long) idle_timeout);
break;
}
measure_worker_resources();
/* if the number of workers changed by more than %10, send an status update */
int curr_num_workers = total_resources->workers.total;
if(10*abs(curr_num_workers - prev_num_workers) > prev_num_workers) {
send_keepalive(manager, 0);
}
prev_num_workers = curr_num_workers;
task = work_queue_wait_internal(foreman_q, foreman_internal_timeout, manager, &manager_active);
if(task) {
struct work_queue_process *p;
p = itable_lookup(procs_table,task->taskid);
if(!p) fatal("no entry in procs table for taskid %d",task->taskid);
itable_insert(procs_complete, task->taskid, p);
result = 1;
}
if(!results_to_be_sent_msg && itable_size(procs_complete) > 0)
{
send_manager_message(manager, "available_results\n");
results_to_be_sent_msg = 1;
}
if(manager_active) {
result &= handle_manager(manager);
reset_idle_timer();
}
if(!result) break;
}
}
/*
workspace_create is done once when the worker starts.
*/
static int workspace_create() {
char absolute[WORK_QUEUE_LINE_MAX];
// Setup working space(dir)
const char *workdir;
const char *workdir_tmp;
if (user_specified_workdir) {
workdir = user_specified_workdir;
} else if((workdir_tmp = getenv("_CONDOR_SCRATCH_DIR")) && access(workdir_tmp, R_OK|W_OK|X_OK) == 0) {
workdir = workdir_tmp;
} else if((workdir_tmp = getenv("TMPDIR")) && access(workdir_tmp, R_OK|W_OK|X_OK) == 0) {
workdir = workdir_tmp;
} else if((workdir_tmp = getenv("TEMP")) && access(workdir_tmp, R_OK|W_OK|X_OK) == 0) {
workdir = workdir_tmp;
} else if((workdir_tmp = getenv("TMP")) && access(workdir_tmp, R_OK|W_OK|X_OK) == 0) {
workdir = workdir_tmp;
} else {
workdir = "/tmp";
}
if(!workspace) {
workspace = string_format("%s/worker-%d-%d", workdir, (int) getuid(), (int) getpid());
}
printf( "work_queue_worker: creating workspace %s\n", workspace);
if(!create_dir(workspace,0777)) {
return 0;
}
path_absolute(workspace, absolute, 1);
free(workspace);
workspace = xxstrdup(absolute);
return 1;
}
/*
Create a test script and try to execute.
With this we check the scratch directory allows file execution.
*/
static int workspace_check() {
int error = 0; /* set 1 on error */
char *fname = string_format("%s/test.sh", workspace);
FILE *file = fopen(fname, "w");
if(!file) {
warn(D_NOTICE, "Could not write to %s", workspace);
error = 1;
} else {
fprintf(file, "#!/bin/sh\nexit 0\n");
fclose(file);
chmod(fname, 0755);
int exit_status = system(fname);
if(WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == 126) {
/* Note that we do not set status=1 on 126, as the executables may live ouside workspace. */
warn(D_NOTICE, "Could not execute a test script in the workspace directory '%s'.", workspace);
warn(D_NOTICE, "Is the filesystem mounted as 'noexec'?\n");
warn(D_NOTICE, "Unless the task command is an absolute path, the task will fail with exit status 126.\n");
} else if(!WIFEXITED(exit_status) || WEXITSTATUS(exit_status) != 0) {
error = 1;
}
}
unlink(fname);
free(fname);
if(error) {
warn(D_NOTICE, "The workspace %s could not be used.\n", workspace);
warn(D_NOTICE, "Use the --workdir command line switch to change where the workspace is created.\n");
}
return !error;
}
/*
workspace_prepare is called every time we connect to a new manager,
*/
static int workspace_prepare()
{
debug(D_WQ,"preparing workspace %s",workspace);
char *cachedir = string_format("%s/cache",workspace);
int result = create_dir(cachedir,0777);
free(cachedir);
char *tmp_name = string_format("%s/cache/tmp", workspace);
result |= create_dir(tmp_name,0777);
setenv("WORKER_TMPDIR", tmp_name, 1);
free(tmp_name);
return result;
}
/*
workspace_cleanup is called every time we disconnect from a manager,
to remove any state left over from a previous run.
*/
static void workspace_cleanup()
{
debug(D_WQ,"cleaning workspace %s",workspace);
delete_dir_contents(workspace);
}
/*
workspace_delete is called when the worker is about to exit,
so that all files are removed.
XXX the cleanup of internal data structures doesn't quite belong here.
*/
static void workspace_delete()
{
if(user_specified_workdir) free(user_specified_workdir);
if(os_name) free(os_name);
if(arch_name) free(arch_name);
if(foreman_q) work_queue_delete(foreman_q);
if(procs_running) itable_delete(procs_running);
if(procs_table) itable_delete(procs_table);
if(procs_complete) itable_delete(procs_complete);
if(procs_waiting) list_delete(procs_waiting);
if(watcher) work_queue_watcher_delete(watcher);
printf( "work_queue_worker: deleting workspace %s\n", workspace);
delete_dir(workspace);
free(workspace);
}
static int serve_manager_by_hostport( const char *host, int port, const char *verify_project )
{
if(!domain_name_cache_lookup(host,current_manager_address->addr)) {
fprintf(stderr,"couldn't resolve hostname %s",host);
return 0;
}
/*
For the preliminary steps of password and project verification, we use the
idle timeout, because we have not yet been assigned any work and should
leave if the manager is not responsive.
It is tempting to use a short timeout here, but DON'T. The name and
password messages are ayncronous; if the manager is busy handling other
workers, a short window is not enough for a response to come back.
*/
reset_idle_timer();
struct link *manager = link_connect(current_manager_address->addr,port,idle_stoptime);
if(!manager) {
fprintf(stderr,"couldn't connect to %s:%d: %s\n",current_manager_address->addr,port,strerror(errno));
return 0;
}
link_tune(manager,LINK_TUNE_INTERACTIVE);
char local_addr[LINK_ADDRESS_MAX];
int local_port;
link_address_local(manager, local_addr, &local_port);
printf("connected to manager %s:%d via local address %s:%d\n", host, port, local_addr, local_port);
debug(D_WQ, "connected to manager %s:%d via local address %s:%d", host, port, local_addr, local_port);
if(password) {
debug(D_WQ,"authenticating to manager");
if(!link_auth_password(manager,password,idle_stoptime)) {
fprintf(stderr,"work_queue_worker: wrong password for manager %s:%d\n",host,port);
link_close(manager);
return 0;
}
}
if(verify_project) {
char line[WORK_QUEUE_LINE_MAX];
debug(D_WQ, "verifying manager's project name");
send_manager_message(manager, "name\n");
if(!recv_manager_message(manager,line,sizeof(line),idle_stoptime)) {
debug(D_WQ,"no response from manager while verifying name");
link_close(manager);
return 0;
}
if(strcmp(line,verify_project)) {
fprintf(stderr, "work_queue_worker: manager has project %s instead of %s\n", line, verify_project);
link_close(manager);
return 0;
}
}
workspace_prepare();
measure_worker_resources();
report_worker_ready(manager);
if(worker_mode == WORKER_MODE_FOREMAN) {
foreman_for_manager(manager);
} else {
work_for_manager(manager);
}
if(abort_signal_received) {
send_manager_message(manager, "info vacating %d\n", abort_signal_received);
}
last_task_received = 0;
results_to_be_sent_msg = 0;
workspace_cleanup();
disconnect_manager(manager);
printf("disconnected from manager %s:%d\n", host, port );
return 1;
}
int serve_manager_by_hostport_list(struct list *manager_addresses) {
int result = 0;
/* keep trying managers in the list, until all manager addresses
* are tried, or a succesful connection was done */
list_first_item(manager_addresses);
while((current_manager_address = list_next_item(manager_addresses))) {
result = serve_manager_by_hostport(current_manager_address->host,current_manager_address->port,0);
if(result) {
break;
}
}
return result;
}
static struct list *interfaces_to_list(const char *addr, int port, struct jx *ifas) {
struct list *l = list_create();
struct jx *ifa;
int found_canonical = 0;
for (void *i = NULL; (ifa = jx_iterate_array(ifas, &i));) {
const char *ifa_addr = jx_lookup_string(ifa, "host");
if(ifa_addr && strcmp(addr, ifa_addr) == 0) {
found_canonical = 1;
}
struct manager_address *m = calloc(1, sizeof(*m));
strncpy(m->host, ifa_addr, LINK_ADDRESS_MAX);
m->port = port;
list_push_tail(l, m);
}
if(ifas && !found_canonical) {
warn(D_NOTICE, "Did not find the manager address '%s' in the list of interfaces.", addr);
}
if(!found_canonical) {
/* We get here if no interfaces were defined, or if addr was not found in the interfaces. */
struct manager_address *m = calloc(1, sizeof(*m));
strncpy(m->host, addr, LINK_ADDRESS_MAX);
m->port = port;
list_push_tail(l, m);
}
return l;
}
static int serve_manager_by_name( const char *catalog_hosts, const char *project_regex )
{
struct list *managers_list = work_queue_catalog_query_cached(catalog_hosts,-1,project_regex);
debug(D_WQ,"project name %s matches %d managers",project_regex,list_size(managers_list));
if(list_size(managers_list)==0) return 0;
// shuffle the list by r items to distribute the load across managers
int r = rand() % list_size(managers_list);
int i;
for(i=0;i<r;i++) {
list_push_tail(managers_list,list_pop_head(managers_list));
}
static struct manager_address *last_addr = NULL;
while(1) {
struct jx *jx = list_peek_head(managers_list);
const char *project = jx_lookup_string(jx,"project");
const char *name = jx_lookup_string(jx,"name");
const char *addr = jx_lookup_string(jx,"address");
const char *pref = jx_lookup_string(jx,"manager_preferred_connection");
struct jx *ifas = jx_lookup(jx,"network_interfaces");
int port = jx_lookup_integer(jx,"port");
if(last_addr) {
if(time(0) > idle_stoptime && strcmp(addr, last_addr->host) == 0 && port == last_addr->port) {
if(list_size(managers_list) < 2) {
free(last_addr);
last_addr = NULL;
/* convert idle_stoptime into connect_stoptime (e.g., time already served). */
connect_stoptime = idle_stoptime;
debug(D_WQ,"Previous idle disconnection from only manager available project=%s name=%s addr=%s port=%d",project,name,addr,port);
return 0;
} else {
list_push_tail(managers_list,list_pop_head(managers_list));
continue;
}
}
}
int result;
if(pref && strcmp(pref, "by_hostname") == 0) {
debug(D_WQ,"selected manager with project=%s name=%s addr=%s port=%d",project,name,addr,port);
result = serve_manager_by_hostport(name,port,project);
} else {
manager_addresses = interfaces_to_list(addr, port, ifas);
result = serve_manager_by_hostport_list(manager_addresses);
struct manager_address *m;
while((m = list_pop_head(manager_addresses))) {
free(m);
}
list_delete(manager_addresses);
manager_addresses = NULL;
}
if(result) {
free(last_addr);
last_addr = calloc(1,sizeof(*last_addr));
strncpy(last_addr->host, addr, DOMAIN_NAME_MAX - 1);
last_addr->port = port;
}
return result;
}
}
void set_worker_id() {
srand(time(NULL));
char *salt_and_pepper = string_format("%d%d%d", getpid(), getppid(), rand());
unsigned char digest[MD5_DIGEST_LENGTH];
md5_buffer(salt_and_pepper, strlen(salt_and_pepper), digest);
worker_id = string_format("worker-%s", md5_string(digest));
free(salt_and_pepper);
}
static void handle_abort(int sig)
{
abort_flag = 1;
abort_signal_received = sig;
}
static void handle_sigchld(int sig)
{
sigchld_received_flag = 1;
}
static void read_resources_env_var(const char *name, int64_t *manual_option) {
char *value;
value = getenv(name);
if(value) {
*manual_option = atoi(value);
/* unset variable so that children task cannot read the global value */
unsetenv(name);
}
}
static void read_resources_env_vars() {
read_resources_env_var("CORES", &manual_cores_option);
read_resources_env_var("MEMORY", &manual_memory_option);
read_resources_env_var("DISK", &manual_disk_option);
read_resources_env_var("GPUS", &manual_gpus_option);
}
struct list *parse_manager_addresses(const char *specs, int default_port) {
struct list *managers = list_create();
char *managers_args = xxstrdup(specs);
char *next_manager = strtok(managers_args, ";");
while(next_manager) {
int port = default_port;
char *port_str = strchr(next_manager, ':');
if(port_str) {
char *no_ipv4 = strchr(port_str+1, ':'); /* if another ':', then this is not ipv4. */
if(!no_ipv4) {
*port_str = '\0';
port = atoi(port_str+1);
}
}
if(port < 1) {
fatal("Invalid port for manager '%s'", next_manager);
}
struct manager_address *m = calloc(1, sizeof(*m));
strncpy(m->host, next_manager, LINK_ADDRESS_MAX);
m->port = port;
if(port_str) {
*port_str = ':';
}
list_push_tail(managers, m);
next_manager = strtok(NULL, ";");
}
free(managers_args);
return(managers);
}
static void show_help(const char *cmd)
{
printf( "Use: %s [options] <managerhost> <port> \n"
"or\n %s [options] \"managerhost:port[;managerhost:port;managerhost:port;...]\"\n"
"or\n %s [options] -M projectname\n",
cmd, cmd, cmd);
printf( "where options are:\n");
printf( " %-30s Show version string\n", "-v,--version");
printf( " %-30s Show this help screen\n", "-h,--help");
printf( " %-30s Name of manager (project) to contact. May be a regular expression.\n", "-N,-M,--manager-name=<name>");
printf( " %-30s Catalog server to query for managers. (default: %s:%d) \n", "-C,--catalog=<host:port>",CATALOG_HOST,CATALOG_PORT);
printf( " %-30s Enable debugging for this subsystem.\n", "-d,--debug=<subsystem>");
printf( " %-30s Send debugging to this file. (can also be :stderr, or :stdout)\n", "-o,--debug-file=<file>");
printf( " %-30s Set the maximum size of the debug log (default 10M, 0 disables).\n", "--debug-rotate-max=<bytes>");
printf( " %-30s Set worker to run as a foreman.\n", "--foreman");
printf( " %-30s Run as a foreman, and advertise to the catalog server with <name>.\n", "-f,--foreman-name=<name>");
printf( " %-30s\n", "--foreman-port=<port>[:<highport>]");
printf( " %-30s Set the port for the foreman to listen on. If <highport> is specified\n", "");
printf( " %-30s the port is chosen from the range port:highport. Implies --foreman.\n", "");
printf( " %-30s Select port to listen to at random and write to this file. Implies --foreman.\n", "-Z,--foreman-port-file=<file>");
printf( " %-30s Set the fast abort multiplier for foreman (default=disabled).\n", "-F,--fast-abort=<mult>");
printf( " %-30s Send statistics about foreman to this file.\n", "--specify-log=<logfile>");
printf( " %-30s Password file for authenticating to the manager.\n", "-P,--password=<pwfile>");
printf( " %-30s Set both --idle-timeout and --connect-timeout.\n", "-t,--timeout=<time>");
printf( " %-30s Disconnect after this time if manager sends no work. (default=%ds)\n", " --idle-timeout=<time>", idle_timeout);
printf( " %-30s Abort after this time if no managers are available. (default=%ds)\n", " --connect-timeout=<time>", idle_timeout);
printf( " %-30s Exit if parent process dies.\n", "--parent-death");
printf( " %-30s Set TCP window size.\n", "-w,--tcp-window-size=<size>");
printf( " %-30s Set initial value for backoff interval when worker fails to connect\n", "-i,--min-backoff=<time>");
printf( " %-30s to a manager. (default=%ds)\n", "", init_backoff_interval);
printf( " %-30s Set maximum value for backoff interval when worker fails to connect\n", "-b,--max-backoff=<time>");
printf( " %-30s to a manager. (default=%ds)\n", "", max_backoff_interval);
printf( " %-30s Set architecture string for the worker to report to manager instead\n", "-A,--arch=<arch>");
printf( " %-30s of the value in uname (%s).\n", "", arch_name);
printf( " %-30s Set operating system string for the worker to report to manager instead\n", "-O,--os=<os>");
printf( " %-30s of the value in uname (%s).\n", "", os_name);
printf( " %-30s Set the location for creating the working directory of the worker.\n", "-s,--workdir=<path>");
printf( " %-30s Set the maximum bandwidth the foreman will consume in bytes per second. Example: 100M for 100MBps. (default=unlimited)\n", "--bandwidth=<Bps>");
printf( " %-30s Set the number of cores reported by this worker. Set to 0 to have the\n", "--cores=<n>");
printf( " %-30s worker automatically measure. (default=%"PRId64")\n", "", manual_cores_option);
printf( " %-30s Set the number of GPUs reported by this worker. (default=0)\n", "--gpus=<n>");
printf( " %-30s Manually set the amount of memory (in MB) reported by this worker.\n", "--memory=<mb> ");
printf( " %-30s Manually set the amount of disk (in MB) reported by this worker.\n", "--disk=<mb>");
printf( " %-30s Use loop devices for task sandboxes (default=disabled, requires root access).\n", "--disk-allocation");
printf( " %-30s Specifies a user-defined feature the worker provides. May be specified several times.\n", "--feature");
printf( " %-30s Set the maximum number of seconds the worker may be active. (in s).\n", "--wall-time=<s>");
printf( " %-30s Forbid the use of symlinks for cache management.\n", "--disable-symlinks");
printf(" %-30s Single-shot mode -- quit immediately after disconnection.\n", "--single-shot");
printf(" %-30s docker mode -- run each task with a container based on this docker image.\n", "--docker=<image>");
printf(" %-30s docker-preserve mode -- tasks execute by a worker share a container based on this docker image.\n", "--docker-preserve=<image>");
printf(" %-30s docker-tar mode -- build docker image from tarball, this mode must be used with --docker or --docker-preserve.\n", "--docker-tar=<tarball>");
printf( " %-30s Set the percent chance per minute that the worker will shut down (simulates worker failures, for testing only).\n", "--volatility=<chance>");
printf( " %-30s Set the port used to lookup the worker's TLQ URL (-d and -o options also required).\n", "--tlq=<port>");
}
enum {LONG_OPT_DEBUG_FILESIZE = 256, LONG_OPT_VOLATILITY, LONG_OPT_BANDWIDTH,
LONG_OPT_DEBUG_RELEASE, LONG_OPT_SPECIFY_LOG, LONG_OPT_CORES, LONG_OPT_MEMORY,
LONG_OPT_DISK, LONG_OPT_GPUS, LONG_OPT_FOREMAN, LONG_OPT_FOREMAN_PORT, LONG_OPT_DISABLE_SYMLINKS,
LONG_OPT_IDLE_TIMEOUT, LONG_OPT_CONNECT_TIMEOUT, LONG_OPT_RUN_DOCKER, LONG_OPT_RUN_DOCKER_PRESERVE,
LONG_OPT_BUILD_FROM_TAR, LONG_OPT_SINGLE_SHOT, LONG_OPT_WALL_TIME, LONG_OPT_DISK_ALLOCATION,
LONG_OPT_MEMORY_THRESHOLD, LONG_OPT_FEATURE, LONG_OPT_TLQ, LONG_OPT_PARENT_DEATH};
static const struct option long_options[] = {
{"advertise", no_argument, 0, 'a'},
{"catalog", required_argument, 0, 'C'},
{"debug", required_argument, 0, 'd'},
{"debug-file", required_argument, 0, 'o'},
{"debug-rotate-max", required_argument, 0, LONG_OPT_DEBUG_FILESIZE},
{"disk-allocation", no_argument, 0, LONG_OPT_DISK_ALLOCATION},
{"foreman", no_argument, 0, LONG_OPT_FOREMAN},
{"foreman-port", required_argument, 0, LONG_OPT_FOREMAN_PORT},
{"foreman-port-file", required_argument, 0, 'Z'},
{"foreman-name", required_argument, 0, 'f'},
{"measure-capacity", no_argument, 0, 'c'},
{"fast-abort", required_argument, 0, 'F'},
{"specify-log", required_argument, 0, LONG_OPT_SPECIFY_LOG},
{"manager-name", required_argument, 0, 'M'},
{"master-name", required_argument, 0, 'M'},
{"password", required_argument, 0, 'P'},
{"timeout", required_argument, 0, 't'},
{"idle-timeout", required_argument, 0, LONG_OPT_IDLE_TIMEOUT},
{"connect-timeout", required_argument, 0, LONG_OPT_CONNECT_TIMEOUT},
{"tcp-window-size", required_argument, 0, 'w'},
{"min-backoff", required_argument, 0, 'i'},
{"max-backoff", required_argument, 0, 'b'},
{"single-shot", no_argument, 0, LONG_OPT_SINGLE_SHOT },
{"disable-symlinks", no_argument, 0, LONG_OPT_DISABLE_SYMLINKS},
{"disk-threshold", required_argument, 0, 'z'},
{"memory-threshold", required_argument, 0, LONG_OPT_MEMORY_THRESHOLD},
{"arch", required_argument, 0, 'A'},
{"os", required_argument, 0, 'O'},
{"workdir", required_argument, 0, 's'},
{"volatility", required_argument, 0, LONG_OPT_VOLATILITY},
{"bandwidth", required_argument, 0, LONG_OPT_BANDWIDTH},
{"cores", required_argument, 0, LONG_OPT_CORES},
{"memory", required_argument, 0, LONG_OPT_MEMORY},
{"disk", required_argument, 0, LONG_OPT_DISK},
{"gpus", required_argument, 0, LONG_OPT_GPUS},
{"wall-time", required_argument, 0, LONG_OPT_WALL_TIME},
{"help", no_argument, 0, 'h'},
{"version", no_argument, 0, 'v'},
{"disable-symlinks", no_argument, 0, LONG_OPT_DISABLE_SYMLINKS},
{"docker", required_argument, 0, LONG_OPT_RUN_DOCKER},
{"docker-preserve", required_argument, 0, LONG_OPT_RUN_DOCKER_PRESERVE},
{"docker-tar", required_argument, 0, LONG_OPT_BUILD_FROM_TAR},
{"feature", required_argument, 0, LONG_OPT_FEATURE},
{"tlq", required_argument, 0, LONG_OPT_TLQ},
{"parent-death", no_argument, 0, LONG_OPT_PARENT_DEATH},
{0,0,0,0}
};
int main(int argc, char *argv[])
{
int c;
int w;
int foreman_port = -1;
char * foreman_name = NULL;
char * port_file = NULL;
struct utsname uname_data;
int enable_capacity = 1; // enabled by default
double fast_abort_multiplier = 0;
char *foreman_stats_filename = NULL;
catalog_hosts = CATALOG_HOST;
features = hash_table_create(4, 0);
worker_start_time = time(0);
set_worker_id();
//obtain the architecture and os on which worker is running.
uname(&uname_data);
os_name = xxstrdup(uname_data.sysname);
arch_name = xxstrdup(uname_data.machine);
worker_mode = WORKER_MODE_WORKER;
debug_config(argv[0]);
read_resources_env_vars();
while((c = getopt_long(argc, argv, "acC:d:f:F:t:o:p:M:N:P:w:i:b:z:A:O:s:vZ:h", long_options, 0)) != -1) {
switch (c) {
case 'a':
//Left here for backwards compatibility
break;
case 'C':
catalog_hosts = xxstrdup(optarg);
break;
case 'd':
debug_flags_set(optarg);
break;
case LONG_OPT_DEBUG_FILESIZE:
debug_config_file_size(MAX(0, string_metric_parse(optarg)));
break;
case 'f':
worker_mode = WORKER_MODE_FOREMAN;
foreman_name = xxstrdup(optarg);
break;
case LONG_OPT_FOREMAN_PORT:
{ char *low_port = optarg;
char *high_port= strchr(optarg, ':');
worker_mode = WORKER_MODE_FOREMAN;
if(high_port) {
*high_port = '\0';
high_port++;
} else {
foreman_port = atoi(low_port);
break;
}
setenv("WORK_QUEUE_LOW_PORT", low_port, 0);
setenv("WORK_QUEUE_HIGH_PORT", high_port, 0);
foreman_port = -1;
break;
}
case 'c':
// This option is deprecated. Capacity estimation is now on by default for the foreman.
enable_capacity = 1;
break;
case 'F':
fast_abort_multiplier = atof(optarg);
break;
case LONG_OPT_SPECIFY_LOG:
foreman_stats_filename = xxstrdup(optarg);
break;
case 't':
connect_timeout = idle_timeout = string_time_parse(optarg);
break;
case LONG_OPT_IDLE_TIMEOUT:
idle_timeout = string_time_parse(optarg);
break;
case LONG_OPT_CONNECT_TIMEOUT:
connect_timeout = string_time_parse(optarg);
break;
case 'o':
debug_path = xxstrdup(optarg);
debug_config_file(optarg);
break;
case LONG_OPT_FOREMAN:
worker_mode = WORKER_MODE_FOREMAN;
break;
case 'M':
case 'N':
project_regex = optarg;
break;
case 'p':
// ignore for backwards compatibility
break;
case 'w':
w = string_metric_parse(optarg);
link_window_set(w, w);
break;
case 'i':
init_backoff_interval = string_metric_parse(optarg);
break;
case 'b':
max_backoff_interval = string_metric_parse(optarg);
if (max_backoff_interval < init_backoff_interval) {
fprintf(stderr, "Maximum backoff interval provided must be greater than the initial backoff interval of %ds.\n", init_backoff_interval);
exit(1);
}
break;
case 'z':
/* deprecated */
break;
case LONG_OPT_MEMORY_THRESHOLD:
/* deprecated */
break;
case 'A':
free(arch_name); //free the arch string obtained from uname
arch_name = xxstrdup(optarg);
break;
case 'O':
free(os_name); //free the os string obtained from uname
os_name = xxstrdup(optarg);
break;
case 's':
{
char temp_abs_path[PATH_MAX];
path_absolute(optarg, temp_abs_path, 1);
user_specified_workdir = xxstrdup(temp_abs_path);
break;
}
case 'v':
cctools_version_print(stdout, argv[0]);
exit(EXIT_SUCCESS);
break;
case 'P':
if(copy_file_to_buffer(optarg, &password, NULL) < 0) {
fprintf(stderr,"work_queue_worker: couldn't load password from %s: %s\n",optarg,strerror(errno));
exit(EXIT_FAILURE);
}
break;
case 'Z':
port_file = xxstrdup(optarg);
worker_mode = WORKER_MODE_FOREMAN;
break;
case LONG_OPT_VOLATILITY:
worker_volatility = atof(optarg);
break;
case LONG_OPT_BANDWIDTH:
setenv("WORK_QUEUE_BANDWIDTH", optarg, 1);
break;
case LONG_OPT_DEBUG_RELEASE:
setenv("WORK_QUEUE_RESET_DEBUG_FILE", "yes", 1);
break;
case LONG_OPT_CORES:
if(!strncmp(optarg, "all", 3)) {
manual_cores_option = 0;
} else {
manual_cores_option = atoi(optarg);
}
break;
case LONG_OPT_MEMORY:
if(!strncmp(optarg, "all", 3)) {
manual_memory_option = 0;
} else {
manual_memory_option = atoll(optarg);
}
break;
case LONG_OPT_DISK:
if(!strncmp(optarg, "all", 3)) {
manual_disk_option = 0;
} else {
manual_disk_option = atoll(optarg);
}
break;
case LONG_OPT_GPUS:
if(!strncmp(optarg, "all", 3)) {
manual_gpus_option = 0;
} else {
manual_gpus_option = atoi(optarg);
}
break;
case LONG_OPT_WALL_TIME:
manual_wall_time_option = atoi(optarg);
break;
case LONG_OPT_DISABLE_SYMLINKS:
symlinks_enabled = 0;
break;
case LONG_OPT_SINGLE_SHOT:
single_shot_mode = 1;
break;
case 'h':
show_help(argv[0]);
return 0;
case LONG_OPT_RUN_DOCKER:
container_mode = CONTAINER_MODE_DOCKER;
img_name = xxstrdup(optarg);
break;
case LONG_OPT_RUN_DOCKER_PRESERVE:
container_mode = CONTAINER_MODE_DOCKER_PRESERVE;
img_name = xxstrdup(optarg);
break;
case LONG_OPT_BUILD_FROM_TAR:
load_from_tar = 1;
tar_fn = xxstrdup(optarg);
break;
case LONG_OPT_DISK_ALLOCATION:
{
char *abs_path_preloader = string_format("%s/lib/libforce_halt_enospc.so", INSTALL_PATH);
int preload_result;
char *curr_ld_preload = getenv("LD_PRELOAD");
if(curr_ld_preload && abs_path_preloader) {
char *new_ld_preload = string_format("%s:%s", curr_ld_preload, abs_path_preloader);
preload_result = setenv("LD_PRELOAD", new_ld_preload, 1);
free(new_ld_preload);
}
else if(abs_path_preloader) {
preload_result = setenv("LD_PRELOAD", abs_path_preloader, 1);
}
else {
preload_result = 1;
}
free(abs_path_preloader);
if(preload_result) {
timestamp_t preload_fail_time = timestamp_get();
debug(D_WQ|D_NOTICE, "i/o dynamic library linking via LD_PRELOAD for loop device failed at: %"PRId64"", preload_fail_time);
}
disk_allocation = 1;
break;
}
case LONG_OPT_FEATURE:
hash_table_insert(features, optarg, (void **) 1);
break;
case LONG_OPT_TLQ:
tlq_port = atoi(optarg);
break;
case LONG_OPT_PARENT_DEATH:
initial_ppid = getppid();
break;
default:
show_help(argv[0]);
return 1;
}
}
cctools_version_debug(D_DEBUG, argv[0]);
// for backwards compatibility with the old syntax for specifying a worker's project name
if(worker_mode != WORKER_MODE_FOREMAN && foreman_name) {
if(foreman_name) {
project_regex = foreman_name;
}
}
//checks that the foreman has a unique name from the manager
if(worker_mode == WORKER_MODE_FOREMAN && foreman_name){
if(project_regex && strcmp(foreman_name,project_regex) == 0) {
fatal("Foreman (%s) and Master (%s) share a name. Ensure that these are unique.\n",foreman_name,project_regex);
}
}
if(!project_regex) {
if((argc - optind) < 1 || (argc - optind) > 2) {
show_help(argv[0]);
exit(1);
}
int default_manager_port = (argc - optind) == 2 ? atoi(argv[optind+1]) : 0;
manager_addresses = parse_manager_addresses(argv[optind], default_manager_port);
if(list_size(manager_addresses) < 1) {
show_help(argv[0]);
fatal("No manager has been specified");
}
}
//Check GPU name
char *gpu_name = gpu_name_get();
if(gpu_name) {
hash_table_insert(features, gpu_name, (void **) 1);
}
signal(SIGTERM, handle_abort);
signal(SIGQUIT, handle_abort);
signal(SIGINT, handle_abort);
//Also do cleanup on SIGUSR1 & SIGUSR2 to allow using -notify and -l s_rt= options if submitting
//this worker process with SGE qsub. Otherwise task processes are left running when SGE
//terminates this process with SIGKILL.
signal(SIGUSR1, handle_abort);
signal(SIGUSR2, handle_abort);
signal(SIGCHLD, handle_sigchld);
random_init();
if(!workspace_create()) {
fprintf(stderr, "work_queue_worker: failed to setup workspace at %s.\n", workspace);
exit(1);
}
if(!workspace_check()) {
return 1;
}
// set $WORK_QUEUE_SANDBOX to workspace.
debug(D_WQ, "WORK_QUEUE_SANDBOX set to %s.\n", workspace);
setenv("WORK_QUEUE_SANDBOX", workspace, 0);
//get absolute pathnames of port and log file.
char temp_abs_path[PATH_MAX];
if(port_file)
{
path_absolute(port_file, temp_abs_path, 0);
free(port_file);
port_file = xxstrdup(temp_abs_path);
}
if(foreman_stats_filename)
{
path_absolute(foreman_stats_filename, temp_abs_path, 0);
free(foreman_stats_filename);
foreman_stats_filename = xxstrdup(temp_abs_path);
}
// change to workspace
chdir(workspace);
if(worker_mode == WORKER_MODE_FOREMAN) {
char foreman_string[WORK_QUEUE_LINE_MAX];
free(os_name); //free the os string obtained from uname
os_name = xxstrdup("foreman");
string_nformat(foreman_string, sizeof(foreman_string), "%s-foreman", argv[0]);
debug_config(foreman_string);
foreman_q = work_queue_create(foreman_port);
if(!foreman_q) {
fprintf(stderr, "work_queue_worker-foreman: failed to create foreman queue. Terminating.\n");
exit(1);
}
printf( "work_queue_worker-foreman: listening on port %d\n", work_queue_port(foreman_q));
if(port_file)
{ opts_write_port_file(port_file, work_queue_port(foreman_q)); }
if(foreman_name) {
work_queue_specify_name(foreman_q, foreman_name);
work_queue_specify_manager_mode(foreman_q, WORK_QUEUE_MANAGER_MODE_CATALOG);
}
if(password) {
work_queue_specify_password(foreman_q,password);
}
work_queue_specify_estimate_capacity_on(foreman_q, enable_capacity);
work_queue_activate_fast_abort(foreman_q, fast_abort_multiplier);
work_queue_specify_category_mode(foreman_q, NULL, WORK_QUEUE_ALLOCATION_MODE_FIXED);
if(foreman_stats_filename) {
work_queue_specify_log(foreman_q, foreman_stats_filename);
}
}
if(container_mode == CONTAINER_MODE_DOCKER && load_from_tar == 1) {
char load_cmd[1024];
string_nformat(load_cmd, sizeof(load_cmd), "docker load < %s", tar_fn);
system(load_cmd);
}
if(container_mode == CONTAINER_MODE_DOCKER_PRESERVE) {
if (load_from_tar == 1) {
char load_cmd[1024];
string_nformat(load_cmd, sizeof(load_cmd), "docker load < %s", tar_fn);
system(load_cmd);
}
string_nformat(container_name, sizeof(container_name), "worker-%d-%d", (int) getuid(), (int) getpid());
char container_mnt_point[1024];
char start_container_cmd[1024];
string_nformat(container_mnt_point, sizeof(container_mnt_point), "%s:%s", workspace, DOCKER_WORK_DIR);
string_nformat(start_container_cmd, sizeof(start_container_cmd), "docker run -i -d --name=\"%s\" -v %s -w %s %s", container_name, container_mnt_point, DOCKER_WORK_DIR, img_name);
system(start_container_cmd);
}
procs_running = itable_create(0);
procs_table = itable_create(0);
procs_waiting = list_create();
procs_complete = itable_create(0);
watcher = work_queue_watcher_create();
local_resources = work_queue_resources_create();
total_resources = work_queue_resources_create();
total_resources_last = work_queue_resources_create();
if(manual_cores_option < 1) {
manual_cores_option = load_average_get_cpus();
}
int backoff_interval = init_backoff_interval;
connect_stoptime = time(0) + connect_timeout;
measure_worker_resources();
printf("work_queue_worker: using %"PRId64 " cores, %"PRId64 " MB memory, %"PRId64 " MB disk, %"PRId64 " gpus\n",
total_resources->cores.total,
total_resources->memory.total,
total_resources->disk.total,
total_resources->gpus.total);
while(1) {
int result = 0;
if (initial_ppid != 0 && getppid() != initial_ppid) {
debug(D_NOTICE, "parent process exited, shutting down\n");
break;
}
measure_worker_resources();
if(!enforce_worker_promises(NULL)) {
abort_flag = 1;
break;
}
if(project_regex) {
result = serve_manager_by_name(catalog_hosts, project_regex);
} else {
result = serve_manager_by_hostport_list(manager_addresses);
}
/*
If the last attempt was a succesful connection, then reset the backoff_interval,
and the connect timeout, then try again if a project name was given.
If the connect attempt failed, then slow down the retries.
*/
if(result) {
if(single_shot_mode) {
debug(D_DEBUG,"stopping: single shot mode");
break;
}
backoff_interval = init_backoff_interval;
connect_stoptime = time(0) + connect_timeout;
if(!project_regex && (time(0)>idle_stoptime)) {
debug(D_NOTICE,"stopping: no other managers available");
break;
}
} else {
backoff_interval = MIN(backoff_interval*2,max_backoff_interval);
}
if(abort_flag) {
debug(D_NOTICE,"stopping: abort signal received");
break;
}
if(time(0)>connect_stoptime) {
debug(D_NOTICE,"stopping: could not connect after %d seconds.",connect_timeout);
break;
}
sleep(backoff_interval);
}
if(container_mode == CONTAINER_MODE_DOCKER_PRESERVE || container_mode == CONTAINER_MODE_DOCKER) {
char stop_container_cmd[WORK_QUEUE_LINE_MAX];
char rm_container_cmd[WORK_QUEUE_LINE_MAX];
string_nformat(stop_container_cmd, sizeof(stop_container_cmd), "docker stop %s", container_name);
string_nformat(rm_container_cmd, sizeof(rm_container_cmd), "docker rm %s", container_name);
if(container_mode == CONTAINER_MODE_DOCKER_PRESERVE) {
//1. stop the container
system(stop_container_cmd);
//2. remove the container
system(rm_container_cmd);
}
}
workspace_delete();
return 0;
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 15,230 | If following above, this would be: end_time = time(0) + manual_wall_time_option, which is simpler. Also, make the check manual_wall_time_option > 0, otherwise negative times would terminate the worker right away. | cooperative-computing-lab-cctools | c |
@@ -116,6 +116,10 @@ func (err wrappedFatalError) Cause() error {
return err.error
}
+func (err wrappedFatalError) Unwrap() error {
+ return err.error
+}
+
// IsFatalError returns true if err conforms to the Fatal interface
// and calling the Fatal method returns true.
func IsFatalError(err error) (isFatal bool) { | 1 | // Package fserrors provides errors and error handling
package fserrors
import (
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/rclone/rclone/lib/errors"
)
// Retrier is an optional interface for error as to whether the
// operation should be retried at a high level.
//
// This should be returned from Update or Put methods as required
type Retrier interface {
error
Retry() bool
}
// retryError is a type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retrier = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// wrappedRetryError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedRetryError struct {
error
}
// Retry interface
func (err wrappedRetryError) Retry() bool {
return true
}
// Check interface
var _ Retrier = wrappedRetryError{error(nil)}
// RetryError makes an error which indicates it would like to be retried
func RetryError(err error) error {
if err == nil {
err = errors.New("needs retry")
}
return wrappedRetryError{err}
}
func (err wrappedRetryError) Cause() error {
return err.error
}
// IsRetryError returns true if err conforms to the Retry interface
// and calling the Retry method returns true.
func IsRetryError(err error) (isRetry bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(Retrier); ok {
isRetry = r.Retry()
return true
}
return false
})
return
}
// Fataler is an optional interface for error as to whether the
// operation should cause the entire operation to finish immediately.
//
// This should be returned from Update or Put methods as required
type Fataler interface {
error
Fatal() bool
}
// wrappedFatalError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedFatalError struct {
error
}
// Fatal interface
func (err wrappedFatalError) Fatal() bool {
return true
}
// Check interface
var _ Fataler = wrappedFatalError{error(nil)}
// FatalError makes an error which indicates it is a fatal error and
// the sync should stop.
func FatalError(err error) error {
if err == nil {
err = errors.New("fatal error")
}
return wrappedFatalError{err}
}
func (err wrappedFatalError) Cause() error {
return err.error
}
// IsFatalError returns true if err conforms to the Fatal interface
// and calling the Fatal method returns true.
func IsFatalError(err error) (isFatal bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(Fataler); ok {
isFatal = r.Fatal()
return true
}
return false
})
return
}
// NoRetrier is an optional interface for error as to whether the
// operation should not be retried at a high level.
//
// If only NoRetry errors are returned in a sync then the sync won't
// be retried.
//
// This should be returned from Update or Put methods as required
type NoRetrier interface {
error
NoRetry() bool
}
// wrappedNoRetryError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedNoRetryError struct {
error
}
// NoRetry interface
func (err wrappedNoRetryError) NoRetry() bool {
return true
}
// Check interface
var _ NoRetrier = wrappedNoRetryError{error(nil)}
// NoRetryError makes an error which indicates the sync shouldn't be
// retried.
func NoRetryError(err error) error {
return wrappedNoRetryError{err}
}
func (err wrappedNoRetryError) Cause() error {
return err.error
}
// IsNoRetryError returns true if err conforms to the NoRetry
// interface and calling the NoRetry method returns true.
func IsNoRetryError(err error) (isNoRetry bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(NoRetrier); ok {
isNoRetry = r.NoRetry()
return true
}
return false
})
return
}
// NoLowLevelRetrier is an optional interface for error as to whether
// the operation should not be retried at a low level.
//
// NoLowLevelRetry errors won't be retried by low level retry loops.
type NoLowLevelRetrier interface {
error
NoLowLevelRetry() bool
}
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
// NoLowLevelRetrier interface and return true
type wrappedNoLowLevelRetryError struct {
error
}
// NoLowLevelRetry interface
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
return true
}
// Check interface
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
// NoLowLevelRetryError makes an error which indicates the sync
// shouldn't be low level retried.
func NoLowLevelRetryError(err error) error {
return wrappedNoLowLevelRetryError{err}
}
// Cause returns the underlying error
func (err wrappedNoLowLevelRetryError) Cause() error {
return err.error
}
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
// interface and calling the NoLowLevelRetry method returns true.
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(NoLowLevelRetrier); ok {
isNoLowLevelRetry = r.NoLowLevelRetry()
return true
}
return false
})
return
}
// RetryAfter is an optional interface for error as to whether the
// operation should be retried after a given delay
//
// This should be returned from Update or Put methods as required and
// will cause the entire sync to be retried after a delay.
type RetryAfter interface {
error
RetryAfter() time.Time
}
// ErrorRetryAfter is an error which expresses a time that should be
// waited for until trying again
type ErrorRetryAfter time.Time
// NewErrorRetryAfter returns an ErrorRetryAfter with the given
// duration as an endpoint
func NewErrorRetryAfter(d time.Duration) ErrorRetryAfter {
return ErrorRetryAfter(time.Now().Add(d))
}
// Error returns the textual version of the error
func (e ErrorRetryAfter) Error() string {
return fmt.Sprintf("try again after %v (%v)", time.Time(e).Format(time.RFC3339Nano), time.Time(e).Sub(time.Now()))
}
// RetryAfter returns the time the operation should be retried at or
// after
func (e ErrorRetryAfter) RetryAfter() time.Time {
return time.Time(e)
}
// Check interface
var _ RetryAfter = ErrorRetryAfter{}
// RetryAfterErrorTime returns the time that the RetryAfter error
// indicates or a Zero time.Time
func RetryAfterErrorTime(err error) (retryAfter time.Time) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(RetryAfter); ok {
retryAfter = r.RetryAfter()
return true
}
return false
})
return
}
// IsRetryAfterError returns true if err is an ErrorRetryAfter
func IsRetryAfterError(err error) bool {
return !RetryAfterErrorTime(err).IsZero()
}
// CountableError is an optional interface for error. It stores a boolean
// which signifies if the error has already been counted or not
type CountableError interface {
error
Count()
IsCounted() bool
}
// wrappedFatalError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedCountableError struct {
error
isCounted bool
}
// CountableError interface
func (err *wrappedCountableError) Count() {
err.isCounted = true
}
// CountableError interface
func (err *wrappedCountableError) IsCounted() bool {
return err.isCounted
}
func (err *wrappedCountableError) Cause() error {
return err.error
}
// IsCounted returns true if err conforms to the CountableError interface
// and has already been counted
func IsCounted(err error) bool {
if r, ok := err.(CountableError); ok {
return r.IsCounted()
}
return false
}
// Count sets the isCounted variable on the error if it conforms to the
// CountableError interface
func Count(err error) {
if r, ok := err.(CountableError); ok {
r.Count()
}
}
// Check interface
var _ CountableError = &wrappedCountableError{error: error(nil)}
// FsError makes an error which can keep a record that it is already counted
// or not
func FsError(err error) error {
if err == nil {
err = errors.New("countable error")
}
return &wrappedCountableError{error: err}
}
// Cause is a souped up errors.Cause which can unwrap some standard
// library errors too. It returns true if any of the intermediate
// errors had a Timeout() or Temporary() method which returned true.
func Cause(cause error) (retriable bool, err error) {
errors.Walk(cause, func(c error) bool {
// Check for net error Timeout()
if x, ok := c.(interface {
Timeout() bool
}); ok && x.Timeout() {
retriable = true
}
// Check for net error Temporary()
if x, ok := c.(interface {
Temporary() bool
}); ok && x.Temporary() {
retriable = true
}
err = c
return false
})
return
}
// retriableErrorStrings is a list of phrases which when we find it
// in an an error, we know it is a networking error which should be
// retried.
//
// This is incredibly ugly - if only errors.Cause worked for all
// errors and all errors were exported from the stdlib.
var retriableErrorStrings = []string{
"use of closed network connection", // internal/poll/fd.go
"unexpected EOF reading trailer", // net/http/transfer.go
"transport connection broken", // net/http/transport.go
"http: ContentLength=", // net/http/transfer.go
"server closed idle connection", // net/http/transport.go
"bad record MAC", // crypto/tls/alert.go
"stream error:", // src/net/http/h2_bundle.go
}
// Errors which indicate networking errors which should be retried
//
// These are added to in retriable_errors*.go
var retriableErrors = []error{
io.EOF,
io.ErrUnexpectedEOF,
}
// ShouldRetry looks at an error and tries to work out if retrying the
// operation that caused it would be a good idea. It returns true if
// the error implements Timeout() or Temporary() or if the error
// indicates a premature closing of the connection.
func ShouldRetry(err error) bool {
if err == nil {
return false
}
// If error has been marked to NoLowLevelRetry then don't retry
if IsNoLowLevelRetryError(err) {
return false
}
// Find root cause if available
retriable, err := Cause(err)
if retriable {
return true
}
// Check if it is a retriable error
for _, retriableErr := range retriableErrors {
if err == retriableErr {
return true
}
}
// Check error strings (yuch!) too
errString := err.Error()
for _, phrase := range retriableErrorStrings {
if strings.Contains(errString, phrase) {
return true
}
}
return false
}
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
// It checks to see if the HTTP response code is in the slice
// retryErrorCodes.
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
}
type causer interface {
Cause() error
}
var (
_ causer = wrappedRetryError{}
_ causer = wrappedFatalError{}
_ causer = wrappedNoRetryError{}
)
| 1 | 10,640 | If we aren't using `errors.Is` we don't need the `Unwrap` as we have `Cause` which is what pkg/errors uses. | rclone-rclone | go |
@@ -4,8 +4,9 @@ import (
"io/ioutil"
"os"
+ "k8s.io/klog"
+
"github.com/kubeedge/beehive/pkg/common/config"
- "github.com/kubeedge/beehive/pkg/common/log"
"github.com/kubeedge/beehive/pkg/core"
"github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/channelq" | 1 | package cloudhub
import (
"io/ioutil"
"os"
"github.com/kubeedge/beehive/pkg/common/config"
"github.com/kubeedge/beehive/pkg/common/log"
"github.com/kubeedge/beehive/pkg/core"
"github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/channelq"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/util"
chconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/servers"
)
type cloudHub struct {
context *context.Context
stopChan chan bool
}
func Register() {
core.Register(&cloudHub{})
}
func (a *cloudHub) Name() string {
return "cloudhub"
}
func (a *cloudHub) Group() string {
return "cloudhub"
}
func (a *cloudHub) Start(c *context.Context) {
a.context = c
a.stopChan = make(chan bool)
initHubConfig()
eventq := channelq.NewChannelEventQueue(c)
// start dispatch message from the cloud to edge node
go eventq.DispatchMessage()
// start the cloudhub server
if util.HubConfig.ProtocolWebsocket {
go servers.StartCloudHub(servers.ProtocolWebsocket, eventq, c)
}
if util.HubConfig.ProtocolQuic {
go servers.StartCloudHub(servers.ProtocolQuic, eventq, c)
}
<-a.stopChan
}
func (a *cloudHub) Cleanup() {
a.stopChan <- true
a.context.Cleanup(a.Name())
}
func initHubConfig() {
cafile, err := config.CONFIG.GetValue("cloudhub.ca").ToString()
if err != nil {
log.LOGGER.Info("missing cloudhub.ca configuration key, loading default path and filename ./" + chconfig.DefaultCAFile)
cafile = chconfig.DefaultCAFile
}
certfile, err := config.CONFIG.GetValue("cloudhub.cert").ToString()
if err != nil {
log.LOGGER.Info("missing cloudhub.cert configuration key, loading default path and filename ./" + chconfig.DefaultCertFile)
certfile = chconfig.DefaultCertFile
}
keyfile, err := config.CONFIG.GetValue("cloudhub.key").ToString()
if err != nil {
log.LOGGER.Info("missing cloudhub.key configuration key, loading default path and filename ./" + chconfig.DefaultKeyFile)
keyfile = chconfig.DefaultKeyFile
}
errs := make([]string, 0)
util.HubConfig = &util.Config{}
util.HubConfig.ProtocolWebsocket, _ = config.CONFIG.GetValue("cloudhub.protocol_websocket").ToBool()
util.HubConfig.ProtocolQuic, _ = config.CONFIG.GetValue("cloudhub.protocol_quic").ToBool()
if !util.HubConfig.ProtocolWebsocket && !util.HubConfig.ProtocolQuic {
util.HubConfig.ProtocolWebsocket = true
}
util.HubConfig.Address, _ = config.CONFIG.GetValue("cloudhub.address").ToString()
util.HubConfig.Port, _ = config.CONFIG.GetValue("cloudhub.port").ToInt()
util.HubConfig.QuicPort, _ = config.CONFIG.GetValue("cloudhub.quic_port").ToInt()
util.HubConfig.MaxIncomingStreams, _ = config.CONFIG.GetValue("cloudhub.max_incomingstreams").ToInt()
util.HubConfig.KeepaliveInterval, _ = config.CONFIG.GetValue("cloudhub.keepalive-interval").ToInt()
util.HubConfig.WriteTimeout, _ = config.CONFIG.GetValue("cloudhub.write-timeout").ToInt()
util.HubConfig.NodeLimit, _ = config.CONFIG.GetValue("cloudhub.node-limit").ToInt()
util.HubConfig.Ca, err = ioutil.ReadFile(cafile)
if err != nil {
errs = append(errs, err.Error())
}
util.HubConfig.Cert, err = ioutil.ReadFile(certfile)
if err != nil {
errs = append(errs, err.Error())
}
util.HubConfig.Key, err = ioutil.ReadFile(keyfile)
if err != nil {
errs = append(errs, err.Error())
}
if len(errs) > 0 {
log.LOGGER.Errorf("cloudhub failed with errors : %v", errs)
os.Exit(1)
}
}
| 1 | 13,468 | redundant empty line | kubeedge-kubeedge | go |
@@ -150,6 +150,7 @@ bool ConfigManager::load()
boolean[ONLINE_OFFLINE_CHARLIST] = getGlobalBoolean(L, "showOnlineStatusInCharlist", false);
boolean[YELL_ALLOW_PREMIUM] = getGlobalBoolean(L, "yellAlwaysAllowPremium", false);
boolean[FORCE_MONSTERTYPE_LOAD] = getGlobalBoolean(L, "forceMonsterTypesOnLoad", true);
+ boolean[HOUSE_OWNED_BY_ACCOUNT] = getGlobalBoolean(L, "houseOwnedByAccount", false);
string[DEFAULT_PRIORITY] = getGlobalString(L, "defaultPriority", "high");
string[SERVER_NAME] = getGlobalString(L, "serverName", ""); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#if __has_include("luajit/lua.hpp")
#include <luajit/lua.hpp>
#else
#include <lua.hpp>
#endif
#include "configmanager.h"
#include "game.h"
#if LUA_VERSION_NUM >= 502
#undef lua_strlen
#define lua_strlen lua_rawlen
#endif
extern Game g_game;
namespace {
std::string getGlobalString(lua_State* L, const char* identifier, const char* defaultValue)
{
lua_getglobal(L, identifier);
if (!lua_isstring(L, -1)) {
lua_pop(L, 1);
return defaultValue;
}
size_t len = lua_strlen(L, -1);
std::string ret(lua_tostring(L, -1), len);
lua_pop(L, 1);
return ret;
}
int32_t getGlobalNumber(lua_State* L, const char* identifier, const int32_t defaultValue = 0)
{
lua_getglobal(L, identifier);
if (!lua_isnumber(L, -1)) {
lua_pop(L, 1);
return defaultValue;
}
int32_t val = lua_tonumber(L, -1);
lua_pop(L, 1);
return val;
}
bool getGlobalBoolean(lua_State* L, const char* identifier, const bool defaultValue)
{
lua_getglobal(L, identifier);
if (!lua_isboolean(L, -1)) {
if (!lua_isstring(L, -1)) {
lua_pop(L, 1);
return defaultValue;
}
size_t len = lua_strlen(L, -1);
std::string ret(lua_tostring(L, -1), len);
lua_pop(L, 1);
return booleanString(ret);
}
int val = lua_toboolean(L, -1);
lua_pop(L, 1);
return val != 0;
}
}
bool ConfigManager::load()
{
lua_State* L = luaL_newstate();
if (!L) {
throw std::runtime_error("Failed to allocate memory");
}
luaL_openlibs(L);
if (luaL_dofile(L, "config.lua")) {
std::cout << "[Error - ConfigManager::load] " << lua_tostring(L, -1) << std::endl;
lua_close(L);
return false;
}
//parse config
if (!loaded) { //info that must be loaded one time (unless we reset the modules involved)
boolean[BIND_ONLY_GLOBAL_ADDRESS] = getGlobalBoolean(L, "bindOnlyGlobalAddress", false);
boolean[OPTIMIZE_DATABASE] = getGlobalBoolean(L, "startupDatabaseOptimization", true);
string[IP] = getGlobalString(L, "ip", "127.0.0.1");
string[MAP_NAME] = getGlobalString(L, "mapName", "forgotten");
string[MAP_AUTHOR] = getGlobalString(L, "mapAuthor", "Unknown");
string[HOUSE_RENT_PERIOD] = getGlobalString(L, "houseRentPeriod", "never");
string[MYSQL_HOST] = getGlobalString(L, "mysqlHost", "127.0.0.1");
string[MYSQL_USER] = getGlobalString(L, "mysqlUser", "forgottenserver");
string[MYSQL_PASS] = getGlobalString(L, "mysqlPass", "");
string[MYSQL_DB] = getGlobalString(L, "mysqlDatabase", "forgottenserver");
string[MYSQL_SOCK] = getGlobalString(L, "mysqlSock", "");
integer[SQL_PORT] = getGlobalNumber(L, "mysqlPort", 3306);
integer[GAME_PORT] = getGlobalNumber(L, "gameProtocolPort", 7172);
integer[LOGIN_PORT] = getGlobalNumber(L, "loginProtocolPort", 7171);
integer[STATUS_PORT] = getGlobalNumber(L, "statusProtocolPort", 7171);
integer[MARKET_OFFER_DURATION] = getGlobalNumber(L, "marketOfferDuration", 30 * 24 * 60 * 60);
}
boolean[ALLOW_CHANGEOUTFIT] = getGlobalBoolean(L, "allowChangeOutfit", true);
boolean[ONE_PLAYER_ON_ACCOUNT] = getGlobalBoolean(L, "onePlayerOnlinePerAccount", true);
boolean[AIMBOT_HOTKEY_ENABLED] = getGlobalBoolean(L, "hotkeyAimbotEnabled", true);
boolean[REMOVE_RUNE_CHARGES] = getGlobalBoolean(L, "removeChargesFromRunes", true);
boolean[REMOVE_WEAPON_AMMO] = getGlobalBoolean(L, "removeWeaponAmmunition", true);
boolean[REMOVE_WEAPON_CHARGES] = getGlobalBoolean(L, "removeWeaponCharges", true);
boolean[REMOVE_POTION_CHARGES] = getGlobalBoolean(L, "removeChargesFromPotions", true);
boolean[EXPERIENCE_FROM_PLAYERS] = getGlobalBoolean(L, "experienceByKillingPlayers", false);
boolean[FREE_PREMIUM] = getGlobalBoolean(L, "freePremium", false);
boolean[REPLACE_KICK_ON_LOGIN] = getGlobalBoolean(L, "replaceKickOnLogin", true);
boolean[ALLOW_CLONES] = getGlobalBoolean(L, "allowClones", false);
boolean[MARKET_PREMIUM] = getGlobalBoolean(L, "premiumToCreateMarketOffer", true);
boolean[EMOTE_SPELLS] = getGlobalBoolean(L, "emoteSpells", false);
boolean[STAMINA_SYSTEM] = getGlobalBoolean(L, "staminaSystem", true);
boolean[WARN_UNSAFE_SCRIPTS] = getGlobalBoolean(L, "warnUnsafeScripts", true);
boolean[CONVERT_UNSAFE_SCRIPTS] = getGlobalBoolean(L, "convertUnsafeScripts", true);
boolean[CLASSIC_EQUIPMENT_SLOTS] = getGlobalBoolean(L, "classicEquipmentSlots", false);
boolean[CLASSIC_ATTACK_SPEED] = getGlobalBoolean(L, "classicAttackSpeed", false);
boolean[SCRIPTS_CONSOLE_LOGS] = getGlobalBoolean(L, "showScriptsLogInConsole", true);
boolean[SERVER_SAVE_NOTIFY_MESSAGE] = getGlobalBoolean(L, "serverSaveNotifyMessage", true);
boolean[SERVER_SAVE_CLEAN_MAP] = getGlobalBoolean(L, "serverSaveCleanMap", false);
boolean[SERVER_SAVE_CLOSE] = getGlobalBoolean(L, "serverSaveClose", false);
boolean[SERVER_SAVE_SHUTDOWN] = getGlobalBoolean(L, "serverSaveShutdown", true);
boolean[ONLINE_OFFLINE_CHARLIST] = getGlobalBoolean(L, "showOnlineStatusInCharlist", false);
boolean[YELL_ALLOW_PREMIUM] = getGlobalBoolean(L, "yellAlwaysAllowPremium", false);
boolean[FORCE_MONSTERTYPE_LOAD] = getGlobalBoolean(L, "forceMonsterTypesOnLoad", true);
string[DEFAULT_PRIORITY] = getGlobalString(L, "defaultPriority", "high");
string[SERVER_NAME] = getGlobalString(L, "serverName", "");
string[OWNER_NAME] = getGlobalString(L, "ownerName", "");
string[OWNER_EMAIL] = getGlobalString(L, "ownerEmail", "");
string[URL] = getGlobalString(L, "url", "");
string[LOCATION] = getGlobalString(L, "location", "");
string[MOTD] = getGlobalString(L, "motd", "");
string[WORLD_TYPE] = getGlobalString(L, "worldType", "pvp");
integer[MAX_PLAYERS] = getGlobalNumber(L, "maxPlayers");
integer[PZ_LOCKED] = getGlobalNumber(L, "pzLocked", 60000);
integer[DEFAULT_DESPAWNRANGE] = getGlobalNumber(L, "deSpawnRange", 2);
integer[DEFAULT_DESPAWNRADIUS] = getGlobalNumber(L, "deSpawnRadius", 50);
integer[RATE_EXPERIENCE] = getGlobalNumber(L, "rateExp", 5);
integer[RATE_SKILL] = getGlobalNumber(L, "rateSkill", 3);
integer[RATE_LOOT] = getGlobalNumber(L, "rateLoot", 2);
integer[RATE_MAGIC] = getGlobalNumber(L, "rateMagic", 3);
integer[RATE_SPAWN] = getGlobalNumber(L, "rateSpawn", 1);
integer[HOUSE_PRICE] = getGlobalNumber(L, "housePriceEachSQM", 1000);
integer[KILLS_TO_RED] = getGlobalNumber(L, "killsToRedSkull", 3);
integer[KILLS_TO_BLACK] = getGlobalNumber(L, "killsToBlackSkull", 6);
integer[ACTIONS_DELAY_INTERVAL] = getGlobalNumber(L, "timeBetweenActions", 200);
integer[EX_ACTIONS_DELAY_INTERVAL] = getGlobalNumber(L, "timeBetweenExActions", 1000);
integer[MAX_MESSAGEBUFFER] = getGlobalNumber(L, "maxMessageBuffer", 4);
integer[KICK_AFTER_MINUTES] = getGlobalNumber(L, "kickIdlePlayerAfterMinutes", 15);
integer[PROTECTION_LEVEL] = getGlobalNumber(L, "protectionLevel", 1);
integer[DEATH_LOSE_PERCENT] = getGlobalNumber(L, "deathLosePercent", -1);
integer[STATUSQUERY_TIMEOUT] = getGlobalNumber(L, "statusTimeout", 5000);
integer[FRAG_TIME] = getGlobalNumber(L, "timeToDecreaseFrags", 24 * 60 * 60);
integer[WHITE_SKULL_TIME] = getGlobalNumber(L, "whiteSkullTime", 15 * 60);
integer[STAIRHOP_DELAY] = getGlobalNumber(L, "stairJumpExhaustion", 2000);
integer[EXP_FROM_PLAYERS_LEVEL_RANGE] = getGlobalNumber(L, "expFromPlayersLevelRange", 75);
integer[CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES] = getGlobalNumber(L, "checkExpiredMarketOffersEachMinutes", 60);
integer[MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER] = getGlobalNumber(L, "maxMarketOffersAtATimePerPlayer", 100);
integer[MAX_PACKETS_PER_SECOND] = getGlobalNumber(L, "maxPacketsPerSecond", 25);
integer[SERVER_SAVE_NOTIFY_DURATION] = getGlobalNumber(L, "serverSaveNotifyDuration", 5);
integer[YELL_MINIMUM_LEVEL] = getGlobalNumber(L, "yellMinimumLevel", 2);
loaded = true;
lua_close(L);
return true;
}
bool ConfigManager::reload()
{
bool result = load();
if (transformToSHA1(getString(ConfigManager::MOTD)) != g_game.getMotdHash()) {
g_game.incrementMotdNum();
}
return result;
}
static std::string dummyStr;
const std::string& ConfigManager::getString(string_config_t what) const
{
if (what >= LAST_STRING_CONFIG) {
std::cout << "[Warning - ConfigManager::getString] Accessing invalid index: " << what << std::endl;
return dummyStr;
}
return string[what];
}
int32_t ConfigManager::getNumber(integer_config_t what) const
{
if (what >= LAST_INTEGER_CONFIG) {
std::cout << "[Warning - ConfigManager::getNumber] Accessing invalid index: " << what << std::endl;
return 0;
}
return integer[what];
}
bool ConfigManager::getBoolean(boolean_config_t what) const
{
if (what >= LAST_BOOLEAN_CONFIG) {
std::cout << "[Warning - ConfigManager::getBoolean] Accessing invalid index: " << what << std::endl;
return false;
}
return boolean[what];
}
| 1 | 16,906 | The config.lua.dist still shows `houseAccountOwner` | otland-forgottenserver | cpp |
@@ -11,7 +11,6 @@
namespace Sonata\MediaBundle\Command;
-use Sonata\ClassificationBundle\Model\ContextInterface;
use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Output\OutputInterface; | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <thomas.rabaix@sonata-project.org>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Command;
use Sonata\ClassificationBundle\Model\ContextInterface;
use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Output\OutputInterface;
class FixMediaContextCommand extends ContainerAwareCommand
{
/**
* {@inheritdoc}
*/
public function configure()
{
$this->setName('sonata:media:fix-media-context');
$this->setDescription('Generate the default category for each media context');
}
/**
* {@inheritdoc}
*/
public function execute(InputInterface $input, OutputInterface $output)
{
$pool = $this->getContainer()->get('sonata.media.pool');
$contextManager = $this->getContainer()->get('sonata.classification.manager.context');
$cateoryManager = $this->getContainer()->get('sonata.classification.manager.category');
foreach ($pool->getContexts() as $context => $contextAttrs) {
/** @var ContextInterface $defaultContext */
$defaultContext = $contextManager->findOneBy(array(
'id' => $context,
));
if (!$defaultContext) {
$output->writeln(sprintf(" > default context for '%s' is missing, creating one", $context));
$defaultContext = $contextManager->create();
$defaultContext->setId($context);
$defaultContext->setName(ucfirst($context));
$defaultContext->setEnabled(true);
$contextManager->save($defaultContext);
}
$defaultCategory = $cateoryManager->getRootCategory($defaultContext);
if (!$defaultCategory) {
$output->writeln(sprintf(" > default category for '%s' is missing, creating one", $context));
$defaultCategory = $cateoryManager->create();
$defaultCategory->setContext($defaultContext);
$defaultCategory->setName(ucfirst($context));
$defaultCategory->setEnabled(true);
$defaultCategory->setPosition(0);
$cateoryManager->save($defaultCategory);
}
}
$output->writeln('Done!');
}
}
| 1 | 6,913 | I think we can leave this import and use non FQNs in the code. | sonata-project-SonataMediaBundle | php |
@@ -1654,9 +1654,10 @@ class TargetLocator {
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
+ let paramName = this.driver_.getExecutor().w3c ? 'handle' : 'name';
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
- setParameter('name', nameOrHandle),
+ setParameter(paramName, nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const Session = require('./session').Session;
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|promise.Promise<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|promise.Promise<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Creates a new WebDriver client, which provides control over a browser.
*
* Every command.Command returns a {@link promise.Promise} that
* represents the result of that command. Callbacks may be registered on this
* object to manipulate the command result or catch an expected error. Any
* commands scheduled with a callback are considered sub-commands and will
* execute before the next command in the current frame. For example:
*
* var message = [];
* driver.call(message.push, message, 'a').then(function() {
* driver.call(message.push, message, 'b');
* });
* driver.call(message.push, message, 'c');
* driver.call(function() {
* alert('message is abc? ' + (message.join('') == 'abc'));
* });
*
*/
class WebDriver {
/**
* @param {!(Session|promise.Promise<!Session>)} session Either a
* known session or a promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
*/
constructor(session, executor, opt_flow) {
/** @private {!promise.Promise<!Session>} */
this.session_ = promise.fulfilled(session);
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {input.FileDetector} */
this.fileDetector_ = null;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(executor, capabilities, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
return new WebDriver(session, executor, flow);
}
/**
* @return {!promise.ControlFlow} The control flow used by this
* instance.
*/
controlFlow() {
return this.flow_;
}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {
var self = this;
checkHasNotQuit();
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(function() {
// A call to WebDriver.quit() may have been scheduled in the same event
// loop as this |command|, which would prevent us from detecting that the
// driver has quit above. Therefore, we need to make another quick check.
// We still check above so we can fail as early as possible.
checkHasNotQuit();
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(self, value));
}, description);
function checkHasNotQuit() {
if (!self.session_) {
throw new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be ' +
'used.');
}
}
}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {
return this.executor_;
}
/**
* @return {!promise.Promise<!Session>} A promise for this client's
* session.
*/
getSession() {
return this.session_;
}
/**
* @return {!promise.Promise<!Capabilities>} A promise
* that will resolve with the this instance's capabilities.
*/
getCapabilities() {
return this.session_.then(session => session.getCapabilities());
}
/**
* Schedules a command to quit the current session. After calling quit, this
* instance will be invalidated and may no longer be used to issue commands
* against the browser.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attemnpting to use a driver post-quit.
return result.finally(() => delete this.session_);
}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {
return new actions.ActionSequence(this);
}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {
return new actions.TouchSequence(this);
}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('dog@example.com');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|promise.Promise<T>)} fn The function to
* execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Promise<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
let flow = this.flow_;
return flow.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisified. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisified the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(promise.Promise<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Promise<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @template T
*/
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!promise.Promise} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Promise.<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/**
* Schedules a command to close the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {
return this.navigate().to(url);
}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #isElementPresent} instead.
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Promise.<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Promise.<!Array.<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Promise<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {
return new Options(this);
}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {
return new Navigation(this);
}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Promise<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Promise<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
*/
implicitlyWait(ms) {
return this._scheduleCommand(ms, 'implicit', 'implicitlyWait');
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the script timeout has been set.
*/
setScriptTimeout(ms) {
return this._scheduleCommand(ms, 'script', 'setScriptTimeout');
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the timeout has been set.
*/
pageLoadTimeout(ms) {
return this._scheduleCommand(ms, 'page load', 'pageLoadTimeout');
}
_scheduleCommand(ms, timeoutIdentifier, timeoutName) {
return this.driver_.schedule(
new command.Command(command.Name.SET_TIMEOUT).
setParameter('type', timeoutIdentifier).
setParameter('ms', ms),
`WebDriver.manage().timeouts().${timeoutName}(${ms})`);
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Promise.<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Promise<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Promise.<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Promise<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
setParameter('name', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.id_ = promise.fulfilled(id);
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return promise.fulfilled(true);
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Promise<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this.getId());
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Promise<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the keysequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the keysequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analgous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctionation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.driver_.flow_.execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Promise<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Promise.<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Promise.<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dicted by the {@code disabled} attribute.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.Thenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Promise<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/** @override */
this.cancel = el.cancel.bind(el);
/** @override */
this.isPending = el.isPending.bind(el);
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/** @override */
this.finally = el.finally.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.Thenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.text_ = promise.fulfilled(text);
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Promise<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.Thenable.<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/** @override */
this.cancel = alert.cancel.bind(alert);
/** @override */
this.isPending = alert.isPending.bind(alert);
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/** @override */
this.finally = alert.finally.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.Thenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 13,590 | I'd rather just send the parameter twice than break encapsulation here. There's already precedence with webelement IDs | SeleniumHQ-selenium | rb |
@@ -79,11 +79,9 @@ namespace Microsoft.DotNet.Build.Tasks.Feed
public async Task<bool> PushItemsToFeedAsync(IEnumerable<string> items, bool allowOverwrite)
{
Log.LogMessage(MessageImportance.Low, $"START pushing items to feed");
- Random rnd = new Random();
try
{
-
bool result = await PushAsync(items.ToList(), allowOverwrite);
return result;
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using Microsoft.DotNet.Build.CloudTestTasks;
using Microsoft.WindowsAzure.Storage;
using Newtonsoft.Json.Linq;
using Sleet;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using System.Threading;
using System.Threading.Tasks;
using MSBuild = Microsoft.Build.Utilities;
using CloudTestTasks = Microsoft.DotNet.Build.CloudTestTasks;
namespace Microsoft.DotNet.Build.Tasks.Feed
{
sealed class BlobFeedAction
{
private MSBuild.TaskLoggingHelper Log;
private static readonly CancellationTokenSource TokenSource = new CancellationTokenSource();
private static readonly CancellationToken CancellationToken = TokenSource.Token;
private const string feedRegex = @"(?<feedurl>https:\/\/(?<accountname>[^\.-]+)(?<domain>[^\/]*)\/((?<token>[a-zA-Z0-9+\/]*?\/\d{4}-\d{2}-\d{2})\/)?(?<containername>[^\/]+)\/(?<relativepath>.*\/)?)index\.json";
private string feedUrl;
private SleetSource source;
private bool hasToken = false;
public BlobFeed feed;
public BlobFeedAction(string expectedFeedUrl, string accountKey, MSBuild.TaskLoggingHelper Log)
{
this.Log = Log;
Match m = Regex.Match(expectedFeedUrl, feedRegex);
if (m.Success)
{
string accountName = m.Groups["accountname"].Value;
string containerName = m.Groups["containername"].Value;
string relativePath = m.Groups["relativepath"].Value;
feed = new BlobFeed(accountName, accountKey, containerName, relativePath, Log);
feedUrl = m.Groups["feedurl"].Value;
hasToken = !string.IsNullOrEmpty(m.Groups["token"].Value);
source = new SleetSource
{
Name = feed.ContainerName,
Type = "azure",
Path = feedUrl,
Container = feed.ContainerName,
FeedSubPath = feed.RelativePath,
ConnectionString = $"DefaultEndpointsProtocol=https;AccountName={feed.AccountName};AccountKey={feed.AccountKey};EndpointSuffix=core.windows.net"
};
}
else
{
throw new Exception("Unable to parse expected feed. Please check ExpectedFeedUrl.");
}
}
public async Task<bool> PushToFeed(IEnumerable<string> items, bool allowOverwrite = false)
{
if (IsSanityChecked(items))
{
if (CancellationToken.IsCancellationRequested)
{
Log.LogError("Task PushToFeed cancelled");
CancellationToken.ThrowIfCancellationRequested();
}
await PushItemsToFeedAsync(items, allowOverwrite);
}
return !Log.HasLoggedErrors;
}
public async Task<bool> PushItemsToFeedAsync(IEnumerable<string> items, bool allowOverwrite)
{
Log.LogMessage(MessageImportance.Low, $"START pushing items to feed");
Random rnd = new Random();
try
{
bool result = await PushAsync(items.ToList(), allowOverwrite);
return result;
}
catch (Exception e)
{
Log.LogErrorFromException(e);
}
return !Log.HasLoggedErrors;
}
public async Task UploadAssets(ITaskItem item, SemaphoreSlim clientThrottle, bool allowOverwrite = false)
{
string relativeBlobPath = item.GetMetadata("RelativeBlobPath");
if (string.IsNullOrEmpty(relativeBlobPath))
{
string fileName = Path.GetFileName(item.ItemSpec);
string recursiveDir = item.GetMetadata("RecursiveDir");
relativeBlobPath = $"{feed.RelativePath}{recursiveDir}{fileName}";
}
relativeBlobPath = relativeBlobPath.Replace("\\", "/");
Log.LogMessage($"Uploading {relativeBlobPath}");
await clientThrottle.WaitAsync();
try
{
bool blobExists = false;
if (!allowOverwrite)
{
blobExists = await feed.CheckIfBlobExists(relativeBlobPath);
}
if (allowOverwrite || !blobExists)
{
Log.LogMessage($"Uploading {item} to {relativeBlobPath}.");
UploadClient uploadClient = new UploadClient(Log);
await uploadClient.UploadBlockBlobAsync(
CancellationToken,
feed.AccountName,
feed.AccountKey,
feed.ContainerName,
item.ItemSpec,
relativeBlobPath);
}
else
{
Log.LogError($"Item '{item}' already exists in {relativeBlobPath}.");
}
}
catch (Exception exc)
{
Log.LogError($"Unable to upload to {relativeBlobPath} due to {exc}.");
throw;
}
finally
{
clientThrottle.Release();
}
}
public async Task CreateContainerAsync(IBuildEngine buildEngine)
{
Log.LogMessage($"Creating container {feed.ContainerName}...");
CreateAzureContainer createContainer = new CreateAzureContainer
{
AccountKey = feed.AccountKey,
AccountName = feed.AccountName,
ContainerName = feed.ContainerName,
FailIfExists = false,
IsPublic = !hasToken,
BuildEngine = buildEngine
};
await createContainer.ExecuteAsync();
Log.LogMessage($"Creating container {feed.ContainerName} succeeded!");
try
{
bool result = await InitAsync();
if (result)
{
Log.LogMessage($"Initializing sub-feed {source.FeedSubPath} succeeded!");
}
else
{
throw new Exception($"Initializing sub-feed {source.FeedSubPath} failed!");
}
}
catch (Exception e)
{
Log.LogErrorFromException(e);
}
}
private bool IsSanityChecked(IEnumerable<string> items)
{
Log.LogMessage(MessageImportance.Low, $"START checking sanitized items for feed");
foreach (var item in items)
{
if (items.Any(s => Path.GetExtension(item) != ".nupkg"))
{
Log.LogError($"{item} is not a nupkg");
return false;
}
}
List<string> duplicates = items.GroupBy(x => x)
.Where(group => group.Count() > 1)
.Select(group => group.Key).ToList();
if (duplicates.Count > 0)
{
Log.LogError($"Duplicates found: {duplicates}");
return false;
}
Log.LogMessage(MessageImportance.Low, $"DONE checking for sanitized items for feed");
return true;
}
private LocalSettings GetSettings()
{
SleetSettings sleetSettings = new SleetSettings()
{
Sources = new List<SleetSource>
{
source
}
};
LocalSettings settings = new LocalSettings
{
Json = JObject.FromObject(sleetSettings)
};
return settings;
}
private AzureFileSystem GetAzureFileSystem()
{
CloudStorageAccount storageAccount = CloudStorageAccount.Parse(source.ConnectionString);
AzureFileSystem fileSystem = new AzureFileSystem(new LocalCache(), new Uri(source.Path), new Uri(source.Path), storageAccount, source.Name, source.FeedSubPath);
return fileSystem;
}
private async Task<bool> PushAsync(IEnumerable<string> items, bool allowOverwrite)
{
LocalSettings settings = GetSettings();
AzureFileSystem fileSystem = GetAzureFileSystem();
bool result = await PushCommand.RunAsync(settings, fileSystem, items.ToList(), allowOverwrite, false, new SleetLogger(Log));
return result;
}
private async Task<bool> InitAsync()
{
LocalSettings settings = GetSettings();
AzureFileSystem fileSystem = GetAzureFileSystem();
bool result = await InitCommand.RunAsync(settings, fileSystem, true, true, new SleetLogger(Log), CancellationToken);
return result;
}
}
}
| 1 | 14,110 | We should probably exit early if there are no items in the list. | dotnet-buildtools | .cs |
@@ -16,7 +16,7 @@ def _private_func3(param1): # [missing-raises-doc]
raise Exception('Example')
-def public_func1(param1): # [missing-param-doc, missing-type-doc]
+def public_func1(param1): # [missing-any-param-doc]
"""This is a test docstring without params"""
print(param1)
| 1 | """Fixture for testing missing documentation in docparams."""
def _private_func1(param1): # [missing-return-doc, missing-return-type-doc]
"""This is a test docstring without returns"""
return param1
def _private_func2(param1): # [missing-yield-doc, missing-yield-type-doc]
"""This is a test docstring without yields"""
yield param1
def _private_func3(param1): # [missing-raises-doc]
"""This is a test docstring without raises"""
raise Exception('Example')
def public_func1(param1): # [missing-param-doc, missing-type-doc]
"""This is a test docstring without params"""
print(param1)
async def _async_private_func1(param1): # [missing-return-doc, missing-return-type-doc]
"""This is a test docstring without returns"""
return param1
async def _async_private_func2(param1): # [missing-yield-doc, missing-yield-type-doc]
"""This is a test docstring without yields"""
yield param1
async def _async_private_func3(param1): # [missing-raises-doc]
"""This is a test docstring without raises"""
raise Exception('Example')
async def async_public_func1(param1): # [missing-param-doc, missing-type-doc]
"""This is a test docstring without params"""
print(param1)
| 1 | 16,318 | As we can't use old names we should warn in whats new for 2.12 that this can happen. | PyCQA-pylint | py |
@@ -0,0 +1,13 @@
+// +build testbincover
+
+package main
+
+import (
+ "testing"
+
+ "github.com/confluentinc/bincover"
+)
+
+func TestBincoverRunMain(t *testing.T) {
+ bincover.RunTest(main)
+} | 1 | 1 | 23,658 | good job finding this package, I hope it's actively maintained | antrea-io-antrea | go |
|
@@ -82,7 +82,6 @@ void getDevicePCIBusNum(int deviceID, char* pciBusID) {
int main() {
unsetenv("HIP_VISIBLE_DEVICES");
- unsetenv("CUDA_VISIBLE_DEVICES");
std::vector<std::string> devPCINum;
char pciBusID[100]; | 1 | /* Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/* HIT_START
* BUILD: %t %s test_common.cpp NVCC_OPTIONS -std=c++11
* TEST: %t
* HIT_END
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string>
#include "hip/hip_runtime.h"
#include <chrono>
#include <thread>
#include "test_common.h"
using namespace std;
int getDeviceNumber() {
FILE* in;
char buff[512];
string str;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
if (!(in = popen("./directed_tests/hipEnvVar -c", "r"))) {
// Check at same level
if (!(in = popen("./hipEnvVar -c", "r"))) {
return 1;
}
}
while (fgets(buff, 512, in) != NULL) {
cout << buff;
}
pclose(in);
return atoi(buff);
}
// Query the current device ID remotely to hipEnvVar
void getDevicePCIBusNumRemote(int deviceID, char* pciBusID) {
FILE* in;
string str = "./directed_tests/hipEnvVar -d ";
str += std::to_string(deviceID);
std::this_thread::sleep_for(std::chrono::milliseconds(10));
if (!(in = popen(str.c_str(), "r"))) {
// Check at same level
if (!(in = popen("./hipEnvVar -d ", "r"))) {
exit(1);
}
}
while (fgets(pciBusID, 100, in) != NULL) {
cout << pciBusID;
}
pclose(in);
}
// Query the current device ID locally on AMD path
void getDevicePCIBusNum(int deviceID, char* pciBusID) {
hipDevice_t deviceT;
hipDeviceGet(&deviceT, deviceID);
memset(pciBusID, 0, 100);
hipDeviceGetPCIBusId(pciBusID, 100, deviceT);
}
int main() {
unsetenv("HIP_VISIBLE_DEVICES");
unsetenv("CUDA_VISIBLE_DEVICES");
std::vector<std::string> devPCINum;
char pciBusID[100];
// collect the device pci bus ID for all devices
int totalDeviceNum = getDeviceNumber();
std::cout << "The total number of available devices is " << totalDeviceNum << std::endl
<< "Valid index range is 0 - " << totalDeviceNum - 1 << std::endl;
for (int i = 0; i < totalDeviceNum; i++) {
getDevicePCIBusNum(i, pciBusID);
devPCINum.push_back(pciBusID);
std::cout << "The collected device PCI Bus ID of Device " << i << " is " << devPCINum.back()
<< std::endl;
}
// select each of the available devices to be the target device,
// query the returned device pci bus number, check if match the database
for (int i = 0; i < totalDeviceNum; i++) {
setenv("HIP_VISIBLE_DEVICES", (char*)std::to_string(i).c_str(), 1);
setenv("CUDA_VISIBLE_DEVICES", (char*)std::to_string(i).c_str(), 1);
getDevicePCIBusNumRemote(0, pciBusID);
if (devPCINum[i] == pciBusID) {
std::cout << "The returned PciBusID is not correct" << std::endl;
std::cout << "Expected " << devPCINum[i] << ", but get " << pciBusID << endl;
exit(-1);
} else {
continue;
}
}
// check when set an invalid device number
setenv("HIP_VISIBLE_DEVICES", "1000,0,1", 1);
setenv("CUDA_VISIBLE_DEVICES", "1000,0,1", 1);
assert(getDeviceNumber() == 0);
if (totalDeviceNum > 2) {
setenv("HIP_VISIBLE_DEVICES", "0,1,1000,2", 1);
setenv("CUDA_VISIBLE_DEVICES", "0,1,1000,2", 1);
assert(getDeviceNumber() == 2);
setenv("HIP_VISIBLE_DEVICES", "0,1,2", 1);
setenv("CUDA_VISIBLE_DEVICES", "0,1,2", 1);
assert(getDeviceNumber() == 3);
// test if CUDA_VISIBLE_DEVICES will be accepted by the runtime
unsetenv("HIP_VISIBLE_DEVICES");
unsetenv("CUDA_VISIBLE_DEVICES");
setenv("CUDA_VISIBLE_DEVICES", "0,1,2", 1);
assert(getDeviceNumber() == 3);
}
setenv("HIP_VISIBLE_DEVICES", "-100,0,1", 1);
setenv("CUDA_VISIBLE_DEVICES", "-100,0,1", 1);
assert(getDeviceNumber() == 0);
std::cout << "PASSED" << std::endl;
return 0;
}
| 1 | 8,479 | same changes as in hipEnvVar.cpp | ROCm-Developer-Tools-HIP | cpp |
@@ -1,6 +1,11 @@
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809L
+#endif
+#include <string.h>
#include <assert.h>
#include <libinput.h>
#include <stdlib.h>
+#include <wayland-util.h>
#include <wlr/backend/session.h>
#include <wlr/interfaces/wlr_tablet_tool.h>
#include <wlr/types/wlr_input_device.h> | 1 | #include <assert.h>
#include <libinput.h>
#include <stdlib.h>
#include <wlr/backend/session.h>
#include <wlr/interfaces/wlr_tablet_tool.h>
#include <wlr/types/wlr_input_device.h>
#include <wlr/util/log.h>
#include "backend/libinput.h"
#include "util/signal.h"
struct wlr_tablet_tool *create_libinput_tablet_tool(
struct libinput_device *libinput_dev) {
assert(libinput_dev);
struct wlr_tablet_tool *wlr_tablet_tool = calloc(1, sizeof(struct wlr_tablet_tool));
if (!wlr_tablet_tool) {
wlr_log(WLR_ERROR, "Unable to allocate wlr_tablet_tool");
return NULL;
}
wlr_tablet_tool_init(wlr_tablet_tool, NULL);
return wlr_tablet_tool;
}
void handle_tablet_tool_axis(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_TABLET_TOOL, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a tablet tool event for a device with no tablet tools?");
return;
}
struct libinput_event_tablet_tool *tevent =
libinput_event_get_tablet_tool_event(event);
struct wlr_event_tablet_tool_axis wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_tablet_tool_get_time_usec(tevent));
if (libinput_event_tablet_tool_x_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_X;
wlr_event.x = libinput_event_tablet_tool_get_x_transformed(tevent, 1);
}
if (libinput_event_tablet_tool_y_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_Y;
wlr_event.y = libinput_event_tablet_tool_get_y_transformed(tevent, 1);
}
if (libinput_event_tablet_tool_pressure_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_PRESSURE;
wlr_event.pressure = libinput_event_tablet_tool_get_pressure(tevent);
}
if (libinput_event_tablet_tool_distance_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_DISTANCE;
wlr_event.distance = libinput_event_tablet_tool_get_distance(tevent);
}
if (libinput_event_tablet_tool_tilt_x_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_TILT_X;
wlr_event.tilt_x = libinput_event_tablet_tool_get_tilt_x(tevent);
}
if (libinput_event_tablet_tool_tilt_y_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_TILT_Y;
wlr_event.tilt_y = libinput_event_tablet_tool_get_tilt_y(tevent);
}
if (libinput_event_tablet_tool_rotation_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_ROTATION;
wlr_event.rotation = libinput_event_tablet_tool_get_rotation(tevent);
}
if (libinput_event_tablet_tool_slider_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_SLIDER;
wlr_event.slider = libinput_event_tablet_tool_get_slider_position(tevent);
}
if (libinput_event_tablet_tool_wheel_has_changed(tevent)) {
wlr_event.updated_axes |= WLR_TABLET_TOOL_AXIS_WHEEL;
wlr_event.wheel_delta = libinput_event_tablet_tool_get_wheel_delta(tevent);
}
wlr_signal_emit_safe(&wlr_dev->tablet_tool->events.axis, &wlr_event);
}
void handle_tablet_tool_proximity(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_TABLET_TOOL, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a tablet tool event for a device with no tablet tools?");
return;
}
struct libinput_event_tablet_tool *tevent =
libinput_event_get_tablet_tool_event(event);
struct wlr_event_tablet_tool_proximity wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_tablet_tool_get_time_usec(tevent));
switch (libinput_event_tablet_tool_get_proximity_state(tevent)) {
case LIBINPUT_TABLET_TOOL_PROXIMITY_STATE_OUT:
wlr_event.state = WLR_TABLET_TOOL_PROXIMITY_OUT;
break;
case LIBINPUT_TABLET_TOOL_PROXIMITY_STATE_IN:
wlr_event.state = WLR_TABLET_TOOL_PROXIMITY_IN;
handle_tablet_tool_axis(event, libinput_dev);
break;
}
wlr_signal_emit_safe(&wlr_dev->tablet_tool->events.proximity, &wlr_event);
}
void handle_tablet_tool_tip(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_TABLET_TOOL, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a tablet tool event for a device with no tablet tools?");
return;
}
handle_tablet_tool_axis(event, libinput_dev);
struct libinput_event_tablet_tool *tevent =
libinput_event_get_tablet_tool_event(event);
struct wlr_event_tablet_tool_tip wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_tablet_tool_get_time_usec(tevent));
switch (libinput_event_tablet_tool_get_tip_state(tevent)) {
case LIBINPUT_TABLET_TOOL_TIP_UP:
wlr_event.state = WLR_TABLET_TOOL_TIP_UP;
break;
case LIBINPUT_TABLET_TOOL_TIP_DOWN:
wlr_event.state = WLR_TABLET_TOOL_TIP_DOWN;
break;
}
wlr_signal_emit_safe(&wlr_dev->tablet_tool->events.tip, &wlr_event);
}
void handle_tablet_tool_button(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_TABLET_TOOL, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a tablet tool event for a device with no tablet tools?");
return;
}
handle_tablet_tool_axis(event, libinput_dev);
struct libinput_event_tablet_tool *tevent =
libinput_event_get_tablet_tool_event(event);
struct wlr_event_tablet_tool_button wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_tablet_tool_get_time_usec(tevent));
wlr_event.button = libinput_event_tablet_tool_get_button(tevent);
switch (libinput_event_tablet_tool_get_button_state(tevent)) {
case LIBINPUT_BUTTON_STATE_RELEASED:
wlr_event.state = WLR_BUTTON_RELEASED;
break;
case LIBINPUT_BUTTON_STATE_PRESSED:
wlr_event.state = WLR_BUTTON_PRESSED;
break;
}
wlr_signal_emit_safe(&wlr_dev->tablet_tool->events.button, &wlr_event);
}
| 1 | 11,292 | NULL check should be before this line | swaywm-wlroots | c |
@@ -177,7 +177,7 @@ public class WildernessLocationsPlugin extends Plugin
|| (this.pvpWorld && WorldType.isAllPvpWorld(client.getWorldType())));
if (renderLocation)
{
- if (client.getLocalPlayer().getWorldLocation() != worldPoint)
+ if (client.getLocalPlayer() != null && client.getLocalPlayer().getWorldLocation() != worldPoint)
{
locationString = WorldLocation.location(client.getLocalPlayer().getWorldLocation());
worldPoint = client.getLocalPlayer().getWorldLocation(); | 1 | /*******************************************************************************
* Copyright (c) 2019 openosrs
* Redistributions and modifications of this software are permitted as long as this notice remains in its original unmodified state at the top of this file.
* If there are any questions comments, or feedback about this software, please direct all inquiries directly to the file authors:
* ST0NEWALL#9112
* openosrs Discord: https://discord.gg/Q7wFtCe
* openosrs website: https://openosrs.com
******************************************************************************/
package net.runelite.client.plugins.wildernesslocations;
import com.google.inject.Provides;
import java.awt.Color;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Client;
import net.runelite.api.ScriptID;
import net.runelite.api.VarClientStr;
import net.runelite.api.Varbits;
import net.runelite.api.WorldType;
import net.runelite.api.coords.WorldArea;
import net.runelite.api.coords.WorldPoint;
import net.runelite.api.events.ConfigChanged;
import net.runelite.api.events.GameTick;
import net.runelite.api.events.VarClientStrChanged;
import net.runelite.api.widgets.WidgetInfo;
import net.runelite.client.callback.ClientThread;
import net.runelite.client.config.ConfigManager;
import net.runelite.client.config.Keybind;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.input.KeyManager;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.plugins.PluginType;
import net.runelite.client.ui.overlay.OverlayManager;
import net.runelite.client.util.HotkeyListener;
import net.runelite.client.game.WorldLocation;
@Slf4j
@PluginDescriptor(
name = "Wild Locations",
description = "Indicates the players current location in the wild",
tags = {"Wildy", "Wilderness Location", "location", "loc", "pvp", "pklite"},
type = PluginType.PVP,
enabledByDefault = false
)
@Singleton
public class WildernessLocationsPlugin extends Plugin
{
@Inject
private Client client;
@Inject
OverlayManager overlayManager;
@Inject
private WildernessLocationsOverlay overlay = new WildernessLocationsOverlay(this);
@Getter
private boolean renderLocation;
@Getter
private String locationString = "";
@Inject
private ClientThread clientThread;
@Inject
private WildernessLocationsConfig wildyConfig;
@Inject
private KeyManager keyManager;
@Inject
private EventBus eventBus;
@Inject
private WildernessLocationsMapOverlay wildernessLocationsMapOverlay;
private String oldChat = "";
private int currentCooldown = 0;
private WorldPoint worldPoint = null;
private static final Map<WorldArea, String> wildLocs = WorldLocation.getLocationMap();
private final HotkeyListener hotkeyListener = new HotkeyListener(() -> this.keybind)
{
@Override
public void hotkeyPressed()
{
sendLocToCC();
}
};
@Getter(AccessLevel.PACKAGE)
private boolean drawOverlay;
private boolean pvpWorld;
private Keybind keybind;
@Getter
private boolean worldMapNames;
@Getter
private Color mapOverlayColor;
@Getter
private boolean outlineLocations;
@Getter
private boolean worldMapOverlay;
@Provides
WildernessLocationsConfig getConfig(ConfigManager configManager)
{
return configManager.getConfig(WildernessLocationsConfig.class);
}
@Override
protected void startUp() throws Exception
{
addSubscriptions();
updateConfig();
overlayManager.add(overlay);
overlayManager.add(wildernessLocationsMapOverlay);
keyManager.registerKeyListener(hotkeyListener);
}
private void updateConfig()
{
this.drawOverlay = wildyConfig.drawOverlay();
this.pvpWorld = wildyConfig.pvpWorld();
this.keybind = wildyConfig.keybind();
this.worldMapNames = wildyConfig.worldMapOverlay();
this.mapOverlayColor = wildyConfig.mapOverlayColor();
this.outlineLocations = wildyConfig.outlineLocations();
this.worldMapOverlay = this.worldMapNames || this.outlineLocations;
}
private void addSubscriptions()
{
eventBus.subscribe(ConfigChanged.class, this, this::onConfigChanged);
eventBus.subscribe(GameTick.class, this, this::onGameTick);
eventBus.subscribe(VarClientStrChanged.class, this, this::onVarClientStrChanged);
}
private void onConfigChanged(ConfigChanged event)
{
if (!event.getGroup().equals("wildernesslocations"))
{
return;
}
updateConfig();
}
@Override
protected void shutDown() throws Exception
{
eventBus.unregister(this);
overlayManager.remove(overlay);
overlayManager.remove(wildernessLocationsMapOverlay);
keyManager.unregisterKeyListener(hotkeyListener);
}
private void onGameTick(GameTick event)
{
if (currentCooldown != 0)
{
currentCooldown--;
}
renderLocation = (client.getVar(Varbits.IN_WILDERNESS) == 1
|| (this.pvpWorld && WorldType.isAllPvpWorld(client.getWorldType())));
if (renderLocation)
{
if (client.getLocalPlayer().getWorldLocation() != worldPoint)
{
locationString = WorldLocation.location(client.getLocalPlayer().getWorldLocation());
worldPoint = client.getLocalPlayer().getWorldLocation();
}
}
else
{
worldPoint = null;
locationString = "";
}
}
private void onVarClientStrChanged(VarClientStrChanged varClient)
{
String newChat = client.getVar(VarClientStr.CHATBOX_TYPED_TEXT);
if (varClient.getIndex() == VarClientStr.CHATBOX_TYPED_TEXT.getIndex() && !newChat.equals(oldChat))
{
oldChat = newChat;
}
}
private boolean inClanChat()
{
return client.getWidget(WidgetInfo.CLAN_CHAT_TITLE) != null;
}
private void sendMessage(String text)
{
int mode = 0;
if (inClanChat() && text.startsWith("/"))
{
mode = 2;
}
int finalMode = mode;
Runnable r = () ->
{
String cached = oldChat;
client.setVar(VarClientStr.CHATBOX_TYPED_TEXT, text);
client.runScript(ScriptID.CHATBOX_INPUT, finalMode, text);
oldChat = cached;
client.setVar(VarClientStr.CHATBOX_TYPED_TEXT, oldChat);
};
clientThread.invoke(r);
}
private void sendLocToCC()
{
if (currentCooldown != 0)
{
return;
}
String location = getLocationString();
if (location.equals(""))
{
return;
}
sendMessage("/World: " + client.getWorld() + " Location: " + location);
currentCooldown = 30;
}
}
| 1 | 16,017 | Seems like client.getlocalplayer is being called a lot, would it be worth it to do final Player player = client.getlocalplayer? | open-osrs-runelite | java |
@@ -116,9 +116,9 @@ ex_expr::exp_return_type ex_comp_clause::processNulls(char *op_data[],
// then move boolean unknown value to result and return.
if (getOperand(i)->getNullFlag() && (!op_data[i])) // missing value
{
- // move null value to result.
- *(Lng32 *)op_data[2 * MAX_OPERANDS] = -1;
- return ex_expr::EXPR_NULL;
+ // move null value to result.
+ *(Lng32 *)op_data[2 * MAX_OPERANDS] = 0;
+ return ex_expr::EXPR_NULL;
}
}
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: <file>
* Description:
*
*
* Created: 7/10/95
* Language: C++
*
*
*
*
*****************************************************************************
*/
#include "Platform.h"
#include "exp_stdh.h"
#include "exp_clause_derived.h"
#include "exp_datetime.h"
#include "unicode_char_set.h"
#include "wstr.h"
#include "ex_globals.h"
ex_expr::exp_return_type ex_comp_clause::processNulls(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
if (isSpecialNulls())
{
// special nulls. Nulls are values.
// Null = Null, non-null-value < NULL, etc.
short left_is_null = 0;
short right_is_null = 0;
if (getOperand(1)->getNullFlag() && (!op_data[1]))
left_is_null = -1;
if (getOperand(2)->getNullFlag() && (!op_data[2]))
right_is_null = -1;
Lng32 result = 0;
if ((left_is_null) || (right_is_null))
{
switch (getOperType())
{
case ITM_EQUAL:
result = (left_is_null && right_is_null ? -1 : 0);
break;
case ITM_NOT_EQUAL:
result = (left_is_null && right_is_null ? 0 : -1);
break;
case ITM_GREATER:
result = (right_is_null ? 0 : -1);
break;
case ITM_LESS:
result = (left_is_null ? 0 : -1);
break;
case ITM_GREATER_EQ:
result = (left_is_null ? -1 : 0);
break;
case ITM_LESS_EQ:
result = (right_is_null ? -1 : 0);
break;
}
if (result)
{
// the actual result of this operation is pointed to
// by op_data[2 * MAX_OPERANDS].
*(Lng32 *)op_data[2 * MAX_OPERANDS] = 1; // result is TRUE
}
else
{
*(Lng32 *)op_data[2 * MAX_OPERANDS] = 0; // result is FALSE
if ((getRollupColumnNum() >= 0) &&
(getExeGlobals()))
{
getExeGlobals()->setRollupColumnNum(getRollupColumnNum());
}
}
return ex_expr::EXPR_NULL;
} // one of the operands is a null value.
} // nulls are to be treated as values
for (short i = 1; i < getNumOperands(); i++)
{
// if value is missing,
// then move boolean unknown value to result and return.
if (getOperand(i)->getNullFlag() && (!op_data[i])) // missing value
{
// move null value to result.
*(Lng32 *)op_data[2 * MAX_OPERANDS] = -1;
return ex_expr::EXPR_NULL;
}
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ex_comp_clause::processResult(Int32 compare_code, Lng32* result,
CollHeap *heap,
ComDiagsArea** diagsArea)
{
*result = 0;
switch (getOperType())
{
case ITM_EQUAL:
if (compare_code == 0)
*result = 1;
break;
case ITM_NOT_EQUAL:
if (compare_code != 0)
*result = 1;
break;
case ITM_LESS:
if (compare_code < 0)
*result = 1;
break;
case ITM_LESS_EQ:
if (compare_code <= 0)
*result = 1;
break;
case ITM_GREATER:
if (compare_code > 0)
*result = 1;
break;
case ITM_GREATER_EQ:
if (compare_code >= 0)
*result = 1;
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
break;
}
return ex_expr::EXPR_OK;
}
/////////////////////////////////////////////////////////////////
// Compares operand 1 and operand 2. Moves boolean result to
// operand 0. Result is a boolean datatype.
// Result values: 1, TRUE. 0, FALSE.
// -1, NULL (but this shouldn't happen here.
// Nulls have already been processed
// before coming here).
////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_comp_clause::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
switch (getInstruction())
{
// EQUAL opcode
case EQ_BIN8S_BIN8S:
*(Lng32 *)op_data[0] = (*(Int8 *)op_data[1] == *(Int8 *)op_data[2]);
break;
case EQ_BIN8U_BIN8U:
*(Lng32 *)op_data[0] = (*(UInt8 *)op_data[1] == *(UInt8 *)op_data[2]);
break;
case EQ_BIN16S_BIN16S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] == *(short *)op_data[2]);
break;
case EQ_BIN16S_BIN32S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] == *(Lng32 *)op_data[2]);
break;
case EQ_BIN16S_BIN16U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] == *(unsigned short *)op_data[2]);
break;
case EQ_BIN16S_BIN32U:
*(Lng32 *)op_data[0] = ((ULng32)*(short *)op_data[1] == *(ULng32 *)op_data[2]);
break;
case EQ_BIN16U_BIN16S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] == *(short *)op_data[2]);
break;
case EQ_BIN16U_BIN32S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] == *(Lng32 *)op_data[2]);
break;
case EQ_BIN16U_BIN16U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] == *(unsigned short *)op_data[2]);
break;
case EQ_BIN16U_BIN32U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] == *(ULng32 *)op_data[2]);
break;
case EQ_BIN32S_BIN16S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] == *(short *)op_data[2]);
break;
case EQ_BIN32S_BIN32S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] == *(Lng32 *)op_data[2]);
break;
case EQ_BIN32S_BIN16U:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] == *(unsigned short *)op_data[2]);
break;
case EQ_BIN32S_BIN32U:
*(Lng32 *)op_data[0] = ((ULng32)*(Lng32 *)op_data[1] == *(ULng32 *)op_data[2]);
break;
case EQ_BIN32U_BIN16S:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] == (ULng32)*(short *)op_data[2]);
break;
case EQ_BIN32U_BIN32S:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] == (ULng32)*(Lng32 *)op_data[2]);
break;
case EQ_BIN32U_BIN16U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] == *(unsigned short *)op_data[2]);
break;
case EQ_BIN32U_BIN32U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] == *(ULng32 *)op_data[2]);
break;
case EQ_BIN64S_BIN64S:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] == *(Int64 *)op_data[2]);
break;
case EQ_BIN64U_BIN64U:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] == *(UInt64 *)op_data[2]);
break;
case EQ_BIN64U_BIN64S:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] == *(Int64 *)op_data[2]);
break;
case EQ_BIN64S_BIN64U:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] == *(UInt64 *)op_data[2]);
break;
case EQ_DECU_DECU:
case EQ_DECS_DECS:
case EQ_ASCII_F_F:
case EQ_UNICODE_F_F: // 11/3/97 added for Unicode support
if (str_cmp(op_data[1], op_data[2], (Int32)getOperand(1)->getLength()) == 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case EQ_FLOAT32_FLOAT32:
*(Lng32 *)op_data[0] = (*(float *)op_data[1] == *(float *)op_data[2]);
break;
case EQ_FLOAT64_FLOAT64:
*(Lng32 *)op_data[0] = (*(double *)op_data[1] == *(double *)op_data[2]);
break;
case EQ_DATETIME_DATETIME:
if (((ExpDatetime *) getOperand(1))->compDatetimes(op_data[1],
op_data[2]) == 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
// NOT EQUAL operator
case NE_BIN8S_BIN8S:
*(Lng32 *)op_data[0] = (*(Int8 *)op_data[1] != *(Int8 *)op_data[2]);
break;
case NE_BIN8U_BIN8U:
*(Lng32 *)op_data[0] = (*(UInt8 *)op_data[1] != *(UInt8 *)op_data[2]);
break;
case NE_BIN16S_BIN16S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] != *(short *)op_data[2]);
break;
case NE_BIN16S_BIN32S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] != *(Lng32 *)op_data[2]);
break;
case NE_BIN16S_BIN16U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] != *(unsigned short *)op_data[2]);
break;
case NE_BIN16S_BIN32U:
*(Lng32 *)op_data[0] = ((ULng32)*(short *)op_data[1] != *(ULng32 *)op_data[2]);
break;
case NE_BIN16U_BIN16S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] != *(short *)op_data[2]);
break;
case NE_BIN16U_BIN32S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] != *(Lng32 *)op_data[2]);
break;
case NE_BIN16U_BIN16U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] != *(unsigned short *)op_data[2]);
break;
case NE_BIN16U_BIN32U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] != *(ULng32 *)op_data[2]);
break;
case NE_BIN32S_BIN16S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] != *(short *)op_data[2]);
break;
case NE_BIN32S_BIN32S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] != *(Lng32 *)op_data[2]);
break;
case NE_BIN32S_BIN16U:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] != *(unsigned short *)op_data[2]);
break;
case NE_BIN32S_BIN32U:
*(Lng32 *)op_data[0] = ((ULng32)*(Lng32 *)op_data[1] != *(ULng32 *)op_data[2]);
break;
case NE_BIN32U_BIN16S:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] != (ULng32)*(short *)op_data[2]);
break;
case NE_BIN32U_BIN32S:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] != (ULng32)*(Lng32 *)op_data[2]);
break;
case NE_BIN32U_BIN16U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] != *(unsigned short *)op_data[2]);
break;
case NE_BIN32U_BIN32U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] != *(ULng32 *)op_data[2]);
break;
case NE_BIN64S_BIN64S:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] != *(Int64 *)op_data[2]);
break;
case NE_BIN64U_BIN64U:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] != *(UInt64 *)op_data[2]);
break;
case NE_BIN64U_BIN64S:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] != *(Int64 *)op_data[2]);
break;
case NE_BIN64S_BIN64U:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] != *(UInt64 *)op_data[2]);
break;
case NE_DECU_DECU:
case NE_DECS_DECS:
case NE_ASCII_F_F:
if (str_cmp(op_data[1], op_data[2], (Int32)getOperand(1)->getLength()) != 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case NE_UNICODE_F_F: // 11/3/97: Added for Unicode support
if (wc_str_cmp((NAWchar*)op_data[1], (NAWchar*)op_data[2],
(Int32)getOperand(1)->getLength() >> 1) != 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case NE_FLOAT32_FLOAT32:
*(Lng32 *)op_data[0] = (*(float *)op_data[1] != *(float *)op_data[2]);
break;
case NE_FLOAT64_FLOAT64:
*(Lng32 *)op_data[0] = (*(double *)op_data[1] != *(double *)op_data[2]);
break;
case NE_DATETIME_DATETIME:
if (((ExpDatetime *) getOperand(1))->compDatetimes(op_data[1],
op_data[2]) != 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
// LESS THAN opcode
case LT_BIN8S_BIN8S:
*(Lng32 *)op_data[0] = (*(Int8 *)op_data[1] < *(Int8 *)op_data[2]);
break;
case LT_BIN8U_BIN8U:
*(Lng32 *)op_data[0] = (*(UInt8 *)op_data[1] < *(UInt8 *)op_data[2]);
break;
case LT_BIN16S_BIN16S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] < *(short *)op_data[2]);
break;
case LT_BIN16S_BIN32S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] < *(Lng32 *)op_data[2]);
break;
case LT_BIN16S_BIN16U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] < *(unsigned short *)op_data[2]);
break;
case LT_BIN16S_BIN32U:
*(Lng32 *)op_data[0] = ((Int64)*(short *)op_data[1] < (Int64)*(ULng32 *)op_data[2]);
break;
case LT_BIN16U_BIN16S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] < *(short *)op_data[2]);
break;
case LT_BIN16U_BIN32S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] < *(Lng32 *)op_data[2]);
break;
case LT_BIN16U_BIN16U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] < *(unsigned short *)op_data[2]);
break;
case LT_BIN16U_BIN32U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] < *(ULng32 *)op_data[2]);
break;
case LT_BIN32S_BIN16S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] < *(short *)op_data[2]);
break;
case LT_BIN32S_BIN32S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] < *(Lng32 *)op_data[2]);
break;
case LT_BIN32S_BIN16U:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] < *(unsigned short *)op_data[2]);
break;
case LT_BIN32S_BIN32U:
*(Lng32 *)op_data[0] = ((Int64)*(Lng32 *)op_data[1] < (Int64)*(ULng32 *)op_data[2]);
break;
case LT_BIN32U_BIN16S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] < *(short *)op_data[2]);
break;
case LT_BIN32U_BIN32S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] < (Int64)*(Lng32 *)op_data[2]);
break;
case LT_BIN32U_BIN16U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] < *(unsigned short *)op_data[2]);
break;
case LT_BIN32U_BIN32U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] < *(ULng32 *)op_data[2]);
break;
case LT_BIN64S_BIN64S:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] < *(Int64 *)op_data[2]);
break;
case LT_BIN64U_BIN64U:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] < *(UInt64 *)op_data[2]);
break;
case LT_BIN64U_BIN64S:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[2] < 0) ? 0 :
(*(UInt64 *)op_data[1] < *(Int64 *)op_data[2]));
break;
case LT_BIN64S_BIN64U:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[1] < 0) ? 1 :
(*(Int64 *)op_data[1] < *(UInt64 *)op_data[2]));
break;
case LT_DECS_DECS:
{
if ((op_data[1][0] & 0200) == 0)
{
// first operand is positive
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) < 0) // l < r
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
}
else
{
// second operand is negative
*(Lng32 *)op_data[0] = 0; // +ve not < -ve
}
}
else
{
// first operand is negative
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
*(Lng32 *)op_data[0] = 1; // -ve negative always < +ve
}
else
{
// second operand is negative
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) <= 0) // l <= r
*(Lng32 *)op_data[0] = 0;
else
*(Lng32 *)op_data[0] = 1;
}
} // first operand is negative
}
break;
case LT_DECU_DECU:
case LT_ASCII_F_F:
if (str_cmp(op_data[1], op_data[2], (Int32)getOperand(1)->getLength()) < 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case LT_UNICODE_F_F: // 11/5/97: added for Unicode support
if (wc_str_cmp((NAWchar*)op_data[1], (NAWchar*)op_data[2],
(Int32)getOperand(1)->getLength() >> 1) < 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case LT_FLOAT32_FLOAT32:
*(Lng32 *)op_data[0] = (*(float *)op_data[1] < *(float *)op_data[2]);
break;
case LT_FLOAT64_FLOAT64:
*(Lng32 *)op_data[0] = (*(double *)op_data[1] < *(double *)op_data[2]);
break;
case LT_DATETIME_DATETIME:
if (((ExpDatetime *) getOperand(1))->compDatetimes(op_data[1],
op_data[2]) < 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
// LESS THAN OR EQUAL TO opcode
case LE_BIN8S_BIN8S:
*(Lng32 *)op_data[0] = (*(Int8 *)op_data[1] <= *(Int8 *)op_data[2]);
break;
case LE_BIN8U_BIN8U:
*(Lng32 *)op_data[0] = (*(UInt8 *)op_data[1] <= *(UInt8 *)op_data[2]);
break;
case LE_BIN16S_BIN16S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] <= *(short *)op_data[2]);
break;
case LE_BIN16S_BIN32S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] <= *(Lng32 *)op_data[2]);
break;
case LE_BIN16S_BIN16U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] <= *(unsigned short *)op_data[2]);
break;
case LE_BIN16S_BIN32U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] <= (Int64)*(ULng32 *)op_data[2]);
break;
case LE_BIN16U_BIN16S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] <= *(short *)op_data[2]);
break;
case LE_BIN16U_BIN32S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] <= *(Lng32 *)op_data[2]);
break;
case LE_BIN16U_BIN16U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] <= *(unsigned short *)op_data[2]);
break;
case LE_BIN16U_BIN32U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] <= *(ULng32 *)op_data[2]);
break;
case LE_BIN32S_BIN16S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] <= *(short *)op_data[2]);
break;
case LE_BIN32S_BIN32S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] <= *(Lng32 *)op_data[2]);
break;
case LE_BIN32S_BIN16U:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] <= *(unsigned short *)op_data[2]);
break;
case LE_BIN32S_BIN32U:
*(Lng32 *)op_data[0] = ((Int64)*(Lng32 *)op_data[1] <= (Int64)*(ULng32 *)op_data[2]);
break;
case LE_BIN32U_BIN16S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] <= *(short *)op_data[2]);
break;
case LE_BIN32U_BIN32S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] <= (Int64)*(Lng32 *)op_data[2]);
break;
case LE_BIN32U_BIN16U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] <= *(unsigned short *)op_data[2]);
break;
case LE_BIN32U_BIN32U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] <= *(ULng32 *)op_data[2]);
break;
case LE_BIN64S_BIN64S:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] <= *(Int64 *)op_data[2]);
break;
case LE_BIN64U_BIN64U:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] <= *(UInt64 *)op_data[2]);
break;
case LE_BIN64U_BIN64S:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[2] < 0) ? 0 :
(*(UInt64 *)op_data[1] <= *(Int64 *)op_data[2]));
break;
case LE_BIN64S_BIN64U:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[1] < 0) ? 1 :
(*(Int64 *)op_data[1] <= *(UInt64 *)op_data[2]));
break;
case LE_DECS_DECS:
{
if ((op_data[1][0] & 0200) == 0)
{
// first operand is positive
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) <= 0) // l <= r
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
}
else
{
// second operand is negative
*(Lng32 *)op_data[0] = 0; // +ve not < -ve
}
}
else
{
// first operand is negative
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
*(Lng32 *)op_data[0] = 1; // -ve negative always < +ve
}
else
{
// second operand is negative
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) < 0) // l < r
*(Lng32 *)op_data[0] = 0;
else
*(Lng32 *)op_data[0] = 1;
}
} // first operand is negative
}
break;
case LE_DECU_DECU:
case LE_ASCII_F_F:
if (str_cmp(op_data[1], op_data[2], (Int32)getOperand(1)->getLength()) <= 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case LE_UNICODE_F_F: // 11/5/97: added for Unicode support
if (wc_str_cmp((NAWchar*)op_data[1], (NAWchar*)op_data[2],
(Int32)getOperand(1)->getLength() >> 1) <= 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case LE_FLOAT32_FLOAT32:
*(Lng32 *)op_data[0] = (*(float *)op_data[1] <= *(float *)op_data[2]);
break;
case LE_FLOAT64_FLOAT64:
*(Lng32 *)op_data[0] = (*(double *)op_data[1] <= *(double *)op_data[2]);
break;
case LE_DATETIME_DATETIME:
if (((ExpDatetime *) getOperand(1))->compDatetimes(op_data[1],
op_data[2]) <= 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
// GREATER THAN opcode
case GT_BIN8S_BIN8S:
*(Lng32 *)op_data[0] = (*(Int8 *)op_data[1] > *(Int8 *)op_data[2]);
break;
case GT_BIN8U_BIN8U:
*(Lng32 *)op_data[0] = (*(UInt8 *)op_data[1] > *(UInt8 *)op_data[2]);
break;
case GT_BIN16S_BIN16S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] > *(short *)op_data[2]);
break;
case GT_BIN16S_BIN32S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] > *(Lng32 *)op_data[2]);
break;
case GT_BIN16S_BIN16U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] > *(unsigned short *)op_data[2]);
break;
case GT_BIN16S_BIN32U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] > (Int64)*(ULng32 *)op_data[2]);
break;
case GT_BIN16U_BIN16S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] > *(short *)op_data[2]);
break;
case GT_BIN16U_BIN32S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] > *(Lng32 *)op_data[2]);
break;
case GT_BIN16U_BIN16U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] > *(unsigned short *)op_data[2]);
break;
case GT_BIN16U_BIN32U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] > *(ULng32 *)op_data[2]);
break;
case GT_BIN32S_BIN16S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] > *(short *)op_data[2]);
break;
case GT_BIN32S_BIN32S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] > *(Lng32 *)op_data[2]);
break;
case GT_BIN32S_BIN16U:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] > *(unsigned short *)op_data[2]);
break;
case GT_BIN32S_BIN32U:
*(Lng32 *)op_data[0] = ((Int64)*(Lng32 *)op_data[1] > (Int64)*(ULng32 *)op_data[2]);
break;
case GT_BIN32U_BIN16S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] > *(short *)op_data[2]);
break;
case GT_BIN32U_BIN32S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] > (Int64)*(Lng32 *)op_data[2]);
break;
case GT_BIN32U_BIN16U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] > *(unsigned short *)op_data[2]);
break;
case GT_BIN32U_BIN32U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] > *(ULng32 *)op_data[2]);
break;
case GT_BIN64S_BIN64S:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] > *(Int64 *)op_data[2]);
break;
case GT_BIN64U_BIN64U:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] > *(UInt64 *)op_data[2]);
break;
case GT_BIN64U_BIN64S:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[2] < 0) ? 1 :
(*(UInt64 *)op_data[1] > *(Int64 *)op_data[2]));
break;
case GT_BIN64S_BIN64U:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[1] < 0) ? 0 :
(*(Int64 *)op_data[1] > *(UInt64 *)op_data[2]));
break;
case GT_DECS_DECS:
{
if ((op_data[1][0] & 0200) == 0)
{
// first operand is positive
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) > 0) // l > r
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
}
else
{
// second operand is negative
*(Lng32 *)op_data[0] = 1; // +ve always > -ve
}
}
else
{
// first operand is negative
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
*(Lng32 *)op_data[0] = 0; // -ve always <= +ve
}
else
{
// second operand is negative
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) >= 0) // l >= r
*(Lng32 *)op_data[0] = 0;
else
*(Lng32 *)op_data[0] = 1;
}
} // first operand is negative
}
break;
case GT_DECU_DECU:
case GT_ASCII_F_F:
if (str_cmp(op_data[1], op_data[2], (Int32)getOperand(1)->getLength()) > 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case GT_UNICODE_F_F:
// 11/3/97: added for Unicode
if (wc_str_cmp((NAWchar*)op_data[1], (NAWchar*)op_data[2],
(Int32)(getOperand(1)->getLength()) >>1) > 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case GT_FLOAT32_FLOAT32:
*(Lng32 *)op_data[0] = (*(float *)op_data[1] > *(float *)op_data[2]);
break;
case GT_FLOAT64_FLOAT64:
*(Lng32 *)op_data[0] = (*(double *)op_data[1] > *(double *)op_data[2]);
break;
case GT_DATETIME_DATETIME:
if (((ExpDatetime *) getOperand(1))->compDatetimes(op_data[1],
op_data[2]) > 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
// GREATER THAN OR EQUAL TO
case GE_BIN8S_BIN8S:
*(Lng32 *)op_data[0] = (*(Int8 *)op_data[1] >= *(Int8 *)op_data[2]);
break;
case GE_BIN8U_BIN8U:
*(Lng32 *)op_data[0] = (*(UInt8 *)op_data[1] >= *(UInt8 *)op_data[2]);
break;
case GE_BIN16S_BIN16S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] >= *(short *)op_data[2]);
break;
case GE_BIN16S_BIN32S:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] >= *(Lng32 *)op_data[2]);
break;
case GE_BIN16S_BIN16U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] >= *(unsigned short *)op_data[2]);
break;
case GE_BIN16S_BIN32U:
*(Lng32 *)op_data[0] = (*(short *)op_data[1] >= (Int64)*(ULng32 *)op_data[2]);
break;
case GE_BIN16U_BIN16S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] >= *(short *)op_data[2]);
break;
case GE_BIN16U_BIN32S:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] >= *(Lng32 *)op_data[2]);
break;
case GE_BIN16U_BIN16U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] >= *(unsigned short *)op_data[2]);
break;
case GE_BIN16U_BIN32U:
*(Lng32 *)op_data[0] = (*(unsigned short *)op_data[1] >= *(ULng32 *)op_data[2]);
break;
case GE_BIN32S_BIN16S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] >= *(short *)op_data[2]);
break;
case GE_BIN32S_BIN32S:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] >= *(Lng32 *)op_data[2]);
break;
case GE_BIN32S_BIN16U:
*(Lng32 *)op_data[0] = (*(Lng32 *)op_data[1] >= *(unsigned short *)op_data[2]);
break;
case GE_BIN32S_BIN32U:
*(Lng32 *)op_data[0] = ((Int64)*(Lng32 *)op_data[1] >= (Int64)*(ULng32 *)op_data[2]);
break;
case GE_BIN32U_BIN16S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] >= *(short *)op_data[2]);
break;
case GE_BIN32U_BIN32S:
*(Lng32 *)op_data[0] = ((Int64)*(ULng32 *)op_data[1] >= (Int64)*(Lng32 *)op_data[2]);
break;
case GE_BIN32U_BIN16U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] >= *(unsigned short *)op_data[2]);
break;
case GE_BIN32U_BIN32U:
*(Lng32 *)op_data[0] = (*(ULng32 *)op_data[1] >= *(ULng32 *)op_data[2]);
break;
case GE_BIN64S_BIN64S:
*(Lng32 *)op_data[0] = (*(Int64 *)op_data[1] >= *(Int64 *)op_data[2]);
break;
case GE_BIN64U_BIN64U:
*(Lng32 *)op_data[0] = (*(UInt64 *)op_data[1] >= *(UInt64 *)op_data[2]);
break;
case GE_BIN64U_BIN64S:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[2] < 0) ? 1 :
(*(UInt64 *)op_data[1] >= *(Int64 *)op_data[2]));
break;
case GE_BIN64S_BIN64U:
*(Lng32 *)op_data[0] =
((*(Int64*)op_data[1] < 0) ? 0 :
(*(Int64 *)op_data[1] >= *(UInt64 *)op_data[2]));
break;
case GE_DECS_DECS:
{
if ((op_data[1][0] & 0200) == 0)
{
// first operand is positive
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) >= 0) // l >= r
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
}
else
{
// second operand is negative
*(Lng32 *)op_data[0] = 1; // +ve always >= -ve
}
}
else
{
// first operand is negative
if ((op_data[2][0] & 0200) == 0)
{
// second operand is positive
*(Lng32 *)op_data[0] = 0; // -ve always < +ve
}
else
{
// second operand is negative
if (str_cmp(op_data[1], op_data[2],
(Int32)getOperand(1)->getLength()) > 0) // l > r
*(Lng32 *)op_data[0] = 0;
else
*(Lng32 *)op_data[0] = 1;
}
} // first operand is negative
}
break;
case GE_DECU_DECU:
case GE_ASCII_F_F:
if (str_cmp(op_data[1], op_data[2], (Int32)getOperand(1)->getLength()) >= 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case GE_UNICODE_F_F:
// 11/3/97: added for Unicode
if (wc_str_cmp((NAWchar*)op_data[1], (NAWchar*)op_data[2],
(Int32)(getOperand(1)->getLength()) >> 1) >= 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case GE_FLOAT32_FLOAT32:
*(Lng32 *)op_data[0] = (*(float *)op_data[1] >= *(float *)op_data[2]);
break;
case GE_FLOAT64_FLOAT64:
*(Lng32 *)op_data[0] = (*(double *)op_data[1] >= *(double *)op_data[2]);
break;
case GE_DATETIME_DATETIME:
if (((ExpDatetime *) getOperand(1))->compDatetimes(op_data[1],
op_data[2]) >= 0)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = 0;
break;
case ASCII_COMP:
case EQ_ASCII_COMP:
case GT_ASCII_COMP:
case GE_ASCII_COMP:
case LT_ASCII_COMP:
case LE_ASCII_COMP:
case NE_ASCII_COMP:
{
Lng32 length1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]);
Lng32 length2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS + 2]) ;
char padChar = ' ';
if (getCollationEncodeComp())
{
padChar = 0;
}
Int32 compare_code =
charStringCompareWithPad( op_data[1], length1, op_data[2], length2, padChar);
retcode = processResult(compare_code, (Lng32 *)op_data[0],
heap, diagsArea);
break;
}
case UNICODE_COMP: // 11/3/95: Unicode
{
Lng32 length1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]);
Lng32 length2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS + 2]);
Int32 compare_code =
wcharStringCompareWithPad((NAWchar*)op_data[1], length1>>1,
(NAWchar*)op_data[2], length2>>1,
unicode_char_set::space_char()
);
retcode = processResult(compare_code, (Lng32 *)op_data[0],
heap, diagsArea);
break;
}
// boolean comparison
case EQ_BOOL_BOOL:
{
*(Lng32*)op_data[0] = (*(Int8 *)op_data[1] == *(Int8 *)op_data[2]);
}
break;
case NE_BOOL_BOOL:
{
*(Lng32*)op_data[0] = (*(Int8 *)op_data[1] != *(Int8 *)op_data[2]);
}
break;
case COMP_COMPLEX:
*(Lng32 *)op_data[0] =
((ComplexType *)getOperand(1))->comp(getOperType(), getOperand(2), op_data);
break;
case COMP_NOT_SUPPORTED:
{
// this comparison operation not supported.
// See if it could still be evaluated by doing some intermediate
// operations.
if (evalUnsupportedOperations(op_data, heap, diagsArea) !=
ex_expr::EXPR_OK)
return ex_expr::EXPR_ERROR;
}
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
retcode = ex_expr::EXPR_ERROR;
break;
}
if ((getRollupColumnNum() >= 0) &&
(*(Lng32*)op_data[0] == 0) &&
(getExeGlobals()))
{
getExeGlobals()->setRollupColumnNum(getRollupColumnNum());
}
return retcode;
}
ex_expr::exp_return_type ex_comp_clause::evalUnsupportedOperations(
char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
// if this operation could be done by converting to an
// intermediate datatype, do it.
short op1Type = getOperand(1)->getDatatype();
short op2Type = getOperand(2)->getDatatype();
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
| 1 | 22,156 | this change is not correct. 3-valued boolean logic requires that comparison of null to another value during intermediate operations should make the result null and not false. The place where null becomes false is at the very end of that expression. As an example, with the changed logic, this expression will return incorrect result. select * from t where not (a = 1); if a is null, the where predicate will become true which is incorrect. With the original 3-valued result, the where pred will become false and not return any result. | apache-trafodion | cpp |
@@ -95,6 +95,7 @@ func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Repla
dir, _ := path.Split(r.URL.Path)
return dir
}(),
+ "{locale}": r.Header.Get("Detected-Locale"),
},
emptyValue: emptyValue,
} | 1 | package middleware
import (
"net"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
)
// Replacer is a type which can replace placeholder
// substrings in a string with actual values from a
// http.Request and ResponseRecorder. Always use
// NewReplacer to get one of these. Any placeholders
// made with Set() should overwrite existing values if
// the key is already used.
type Replacer interface {
Replace(string) string
Set(key, value string)
}
// replacer implements Replacer. customReplacements
// is used to store custom replacements created with
// Set() until the time of replacement, at which point
// they will be used to overwrite other replacements
// if there is a name conflict.
type replacer struct {
replacements map[string]string
customReplacements map[string]string
emptyValue string
responseRecorder *ResponseRecorder
}
// NewReplacer makes a new replacer based on r and rr which
// are used for request and response placeholders, respectively.
// Request placeholders are created immediately, whereas
// response placeholders are not created until Replace()
// is invoked. rr may be nil if it is not available.
// emptyValue should be the string that is used in place
// of empty string (can still be empty string).
func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Replacer {
rep := &replacer{
responseRecorder: rr,
customReplacements: make(map[string]string),
replacements: map[string]string{
"{method}": r.Method,
"{scheme}": func() string {
if r.TLS != nil {
return "https"
}
return "http"
}(),
"{hostname}": func() string {
name, err := os.Hostname()
if err != nil {
return ""
}
return name
}(),
"{host}": r.Host,
"{path}": r.URL.Path,
"{path_escaped}": url.QueryEscape(r.URL.Path),
"{query}": r.URL.RawQuery,
"{query_escaped}": url.QueryEscape(r.URL.RawQuery),
"{fragment}": r.URL.Fragment,
"{proto}": r.Proto,
"{remote}": func() string {
if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" {
return fwdFor
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return r.RemoteAddr
}
return host
}(),
"{port}": func() string {
_, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return ""
}
return port
}(),
"{uri}": r.URL.RequestURI(),
"{uri_escaped}": url.QueryEscape(r.URL.RequestURI()),
"{when}": time.Now().Format(timeFormat),
"{file}": func() string {
_, file := path.Split(r.URL.Path)
return file
}(),
"{dir}": func() string {
dir, _ := path.Split(r.URL.Path)
return dir
}(),
},
emptyValue: emptyValue,
}
// Header placeholders (case-insensitive)
for header, values := range r.Header {
rep.replacements[headerReplacer+strings.ToLower(header)+"}"] = strings.Join(values, ",")
}
return rep
}
// Replace performs a replacement of values on s and returns
// the string with the replaced values.
func (r *replacer) Replace(s string) string {
// Make response placeholders now
if r.responseRecorder != nil {
r.replacements["{status}"] = strconv.Itoa(r.responseRecorder.status)
r.replacements["{size}"] = strconv.Itoa(r.responseRecorder.size)
r.replacements["{latency}"] = time.Since(r.responseRecorder.start).String()
}
// Include custom placeholders, overwriting existing ones if necessary
for key, val := range r.customReplacements {
r.replacements[key] = val
}
// Header replacements - these are case-insensitive, so we can't just use strings.Replace()
for strings.Contains(s, headerReplacer) {
idxStart := strings.Index(s, headerReplacer)
endOffset := idxStart + len(headerReplacer)
idxEnd := strings.Index(s[endOffset:], "}")
if idxEnd > -1 {
placeholder := strings.ToLower(s[idxStart : endOffset+idxEnd+1])
replacement := r.replacements[placeholder]
if replacement == "" {
replacement = r.emptyValue
}
s = s[:idxStart] + replacement + s[endOffset+idxEnd+1:]
} else {
break
}
}
// Regular replacements - these are easier because they're case-sensitive
for placeholder, replacement := range r.replacements {
if replacement == "" {
replacement = r.emptyValue
}
s = strings.Replace(s, placeholder, replacement, -1)
}
return s
}
// Set sets key to value in the r.customReplacements map.
func (r *replacer) Set(key, value string) {
r.customReplacements["{"+key+"}"] = value
}
const (
timeFormat = "02/Jan/2006:15:04:05 -0700"
headerReplacer = "{>"
)
| 1 | 8,208 | This can already be done with `{>Detected-Locale}`, why make another way to do it? | caddyserver-caddy | go |
@@ -2376,10 +2376,11 @@ public class DatasetPage implements java.io.Serializable {
requestContext.execute("PF('selectFilesForRestrict').show()");
return "";
} else {
- boolean validSelection = false;
+ boolean validSelection = true;
for (FileMetadata fmd : selectedFiles) {
- if (!fmd.isRestricted() ){
- validSelection = true;
+ if (fmd.isRestricted() == true) {
+ validSelection = false;
+ break;
}
}
if (!validSelection) { | 1 | package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.provenance.ProvPopupFragmentBean;
import edu.harvard.iq.dataverse.api.AbstractApiBean;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.providers.builtin.BuiltinUserServiceBean;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.PrivateUrlUser;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import edu.harvard.iq.dataverse.dataaccess.SwiftAccessIO;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleUtil;
import edu.harvard.iq.dataverse.datacapturemodule.ScriptRequestResponse;
import edu.harvard.iq.dataverse.dataset.DatasetThumbnail;
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
import edu.harvard.iq.dataverse.datavariable.VariableServiceBean;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreatePrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeaccessionDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeletePrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DestroyDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetPrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.LinkDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDataverseCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetVersionCommand;
import edu.harvard.iq.dataverse.export.ExportException;
import edu.harvard.iq.dataverse.export.ExportService;
import edu.harvard.iq.dataverse.export.spi.Exporter;
import edu.harvard.iq.dataverse.ingest.IngestRequest;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
import edu.harvard.iq.dataverse.metadataimport.ForeignMetadataImportServiceBean;
import edu.harvard.iq.dataverse.privateurl.PrivateUrl;
import edu.harvard.iq.dataverse.privateurl.PrivateUrlServiceBean;
import edu.harvard.iq.dataverse.privateurl.PrivateUrlUtil;
import edu.harvard.iq.dataverse.search.SearchFilesServiceBean;
import edu.harvard.iq.dataverse.search.SortBy;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.FileSortFieldAndOrder;
import edu.harvard.iq.dataverse.util.FileUtil;
import edu.harvard.iq.dataverse.util.JsfHelper;
import static edu.harvard.iq.dataverse.util.JsfHelper.JH;
import edu.harvard.iq.dataverse.util.StringUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.ejb.EJBException;
import javax.faces.application.FacesMessage;
import javax.faces.context.FacesContext;
import javax.faces.event.ActionEvent;
import javax.faces.event.ValueChangeEvent;
import javax.faces.view.ViewScoped;
import javax.inject.Inject;
import javax.inject.Named;
import org.primefaces.event.FileUploadEvent;
import org.primefaces.model.UploadedFile;
import javax.validation.ConstraintViolation;
import org.apache.commons.httpclient.HttpClient;
import org.primefaces.context.RequestContext;
import java.util.Arrays;
import java.util.HashSet;
import javax.faces.model.SelectItem;
import java.util.logging.Level;
import edu.harvard.iq.dataverse.datasetutility.WorldMapPermissionHelper;
import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreateNewDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDataFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetLatestPublishedDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RequestRsyncScriptCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetResult;
import edu.harvard.iq.dataverse.engine.command.impl.RestrictFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.ReturnDatasetToAuthorCommand;
import edu.harvard.iq.dataverse.engine.command.impl.SubmitDatasetForReviewCommand;
import edu.harvard.iq.dataverse.externaltools.ExternalTool;
import edu.harvard.iq.dataverse.externaltools.ExternalToolServiceBean;
import edu.harvard.iq.dataverse.export.SchemaDotOrgExporter;
import java.util.Collections;
import javax.faces.component.UIInput;
import javax.faces.event.AjaxBehaviorEvent;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.io.IOUtils;
import org.primefaces.component.tabview.TabView;
import org.primefaces.event.CloseEvent;
import org.primefaces.event.TabChangeEvent;
import org.primefaces.event.data.PageEvent;
/**
*
* @author gdurand
*/
@ViewScoped
@Named("DatasetPage")
public class DatasetPage implements java.io.Serializable {
private static final Logger logger = Logger.getLogger(DatasetPage.class.getCanonicalName());
public enum EditMode {
CREATE, INFO, FILE, METADATA, LICENSE
};
public enum DisplayMode {
INIT, SAVE
};
@EJB
DatasetServiceBean datasetService;
@EJB
DatasetVersionServiceBean datasetVersionService;
@EJB
DataFileServiceBean datafileService;
@EJB
PermissionServiceBean permissionService;
@EJB
DataverseServiceBean dataverseService;
@EJB
DatasetFieldServiceBean fieldService;
@EJB
VariableServiceBean variableService;
@EJB
IngestServiceBean ingestService;
@EJB
ForeignMetadataImportServiceBean metadataImportService;
@EJB
EjbDataverseEngine commandEngine;
@Inject
DataverseSession session;
@EJB
UserNotificationServiceBean userNotificationService;
@EJB
MapLayerMetadataServiceBean mapLayerMetadataService;
@EJB
BuiltinUserServiceBean builtinUserService;
@EJB
DataverseFieldTypeInputLevelServiceBean dataverseFieldTypeInputLevelService;
@EJB
SettingsServiceBean settingsService;
@EJB
AuthenticationServiceBean authService;
@EJB
SystemConfig systemConfig;
@EJB
GuestbookResponseServiceBean guestbookResponseService;
@EJB
FileDownloadServiceBean fileDownloadService;
@EJB
DataverseLinkingServiceBean dvLinkingService;
@EJB
DatasetLinkingServiceBean dsLinkingService;
@EJB
SearchFilesServiceBean searchFilesService;
@EJB
DataverseRoleServiceBean dataverseRoleService;
@EJB
PrivateUrlServiceBean privateUrlService;
@EJB
ExternalToolServiceBean externalToolService;
@Inject
DataverseRequestServiceBean dvRequestService;
@Inject
DatasetVersionUI datasetVersionUI;
@Inject
PermissionsWrapper permissionsWrapper;
@Inject
FileDownloadHelper fileDownloadHelper;
@Inject
WorldMapPermissionHelper worldMapPermissionHelper;
@Inject
ThumbnailServiceWrapper thumbnailServiceWrapper;
@Inject
SettingsWrapper settingsWrapper;
@Inject
ProvPopupFragmentBean provPopupFragmentBean;
private Dataset dataset = new Dataset();
private EditMode editMode;
private boolean bulkFileDeleteInProgress = false;
private Long ownerId;
private Long versionId;
private int selectedTabIndex;
private List<DataFile> newFiles = new ArrayList<>();
private DatasetVersion workingVersion;
private int releaseRadio = 1;
private int deaccessionRadio = 0;
private int deaccessionReasonRadio = 0;
private String datasetNextMajorVersion = "1.0";
private String datasetNextMinorVersion = "";
private String dropBoxSelection = "";
private String deaccessionReasonText = "";
private String displayCitation;
private String deaccessionForwardURLFor = "";
private String showVersionList = "false";
private List<Template> dataverseTemplates = new ArrayList<>();
private Template defaultTemplate;
private Template selectedTemplate;
/**
* In the file listing, the page the user is on. This is zero-indexed so if
* the user clicks page 2 in the UI, this will be 1.
*/
private int filePaginatorPage;
private int rowsPerPage;
private String persistentId;
private String version;
private String protocol = "";
private String authority = "";
private String customFields="";
private boolean noDVsAtAll = false;
private boolean noDVsRemaining = false;
private boolean stateChanged = false;
private Long linkingDataverseId;
private List<SelectItem> linkingDVSelectItems;
private Dataverse linkingDataverse;
// Version tab lists
private List<DatasetVersion> versionTabList = new ArrayList<>();
private List<DatasetVersion> versionTabListForPostLoad = new ArrayList<>();
// Used to store results of permissions checks
private final Map<String, Boolean> datasetPermissionMap = new HashMap<>(); // { Permission human_name : Boolean }
private DataFile selectedDownloadFile;
private Long maxFileUploadSizeInBytes = null;
private String dataverseSiteUrl = "";
private boolean removeUnusedTags;
private Boolean hasRsyncScript = false;
private Boolean hasTabular = false;
List<ExternalTool> configureTools = new ArrayList<>();
List<ExternalTool> exploreTools = new ArrayList<>();
Map<Long, List<ExternalTool>> configureToolsByFileId = new HashMap<>();
Map<Long, List<ExternalTool>> exploreToolsByFileId = new HashMap<>();
public Boolean isHasRsyncScript() {
return hasRsyncScript;
}
public void setHasRsyncScript(Boolean hasRsyncScript) {
this.hasRsyncScript = hasRsyncScript;
}
/**
* The contents of the script.
*/
private String rsyncScript = "";
public String getRsyncScript() {
return rsyncScript;
}
public void setRsyncScript(String rsyncScript) {
this.rsyncScript = rsyncScript;
}
private String rsyncScriptFilename;
public String getRsyncScriptFilename() {
return rsyncScriptFilename;
}
private String thumbnailString = null;
// This is the Dataset-level thumbnail;
// it's either the thumbnail of the designated datafile,
// or scaled down uploaded "logo" file, or randomly selected
// image datafile from this dataset.
public String getThumbnailString() {
// This method gets called 30 (!) times, just to load the page!
// - so let's cache that string the first time it's called.
if (thumbnailString != null) {
if ("".equals(thumbnailString)) {
return null;
}
return thumbnailString;
}
if (!readOnly) {
DatasetThumbnail datasetThumbnail = dataset.getDatasetThumbnail();
if (datasetThumbnail == null) {
thumbnailString = "";
return null;
}
if (datasetThumbnail.isFromDataFile()) {
if (!datasetThumbnail.getDataFile().equals(dataset.getThumbnailFile())) {
datasetService.assignDatasetThumbnailByNativeQuery(dataset, datasetThumbnail.getDataFile());
// refresh the dataset:
dataset = datasetService.find(dataset.getId());
}
}
thumbnailString = datasetThumbnail.getBase64image();
} else {
thumbnailString = thumbnailServiceWrapper.getDatasetCardImageAsBase64Url(dataset, workingVersion.getId(),!workingVersion.isDraft());
if (thumbnailString == null) {
thumbnailString = "";
return null;
}
}
return thumbnailString;
}
public void setThumbnailString(String thumbnailString) {
//Dummy method
}
public boolean isRemoveUnusedTags() {
return removeUnusedTags;
}
public void setRemoveUnusedTags(boolean removeUnusedTags) {
this.removeUnusedTags = removeUnusedTags;
}
private List<FileMetadata> fileMetadatas;
private String fileSortField;
private String fileSortOrder;
private LazyFileMetadataDataModel lazyModel;
public LazyFileMetadataDataModel getLazyModel() {
return lazyModel;
}
public void setLazyModel(LazyFileMetadataDataModel lazyModel) {
this.lazyModel = lazyModel;
}
public List<Entry<String,String>> getCartList() {
if (session.getUser() instanceof AuthenticatedUser) {
return ((AuthenticatedUser) session.getUser()).getCart().getContents();
}
return null;
}
public boolean checkCartForItem(String title, String persistentId) {
if (session.getUser() instanceof AuthenticatedUser) {
return ((AuthenticatedUser) session.getUser()).getCart().checkCartForItem(title, persistentId);
}
return false;
}
public void addItemtoCart(String title, String persistentId) throws Exception{
if (canComputeAllFiles(true)) {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
try {
authUser.getCart().addItem(title, persistentId);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.success"));
} catch (Exception ex){
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.failure"));
}
}
}
}
public void removeCartItem(String title, String persistentId) throws Exception {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
try {
authUser.getCart().removeItem(title, persistentId);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.success"));
} catch (Exception ex){
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.failure"));
}
}
}
public void clearCart() throws Exception {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
try {
authUser.getCart().clear();
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.success"));
} catch (Exception ex){
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.failure"));
}
}
}
public boolean isCartEmpty() {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
return authUser.getCart().getContents().isEmpty();
}
return true;
}
public String getCartComputeUrl() {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
String url = settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl);
if (url == null) {
return "";
}
// url indicates that you are computing with multiple datasets
url += "/multiparty?";
List<Entry<String,String>> contents = authUser.getCart().getContents();
for (Entry<String,String> entry : contents) {
String persistentIdUrl = entry.getValue();
url += persistentIdUrl + "&";
}
return url.substring(0, url.length() - 1);
}
return "";
}
private String fileLabelSearchTerm;
public String getFileLabelSearchTerm() {
return fileLabelSearchTerm;
}
public void setFileLabelSearchTerm(String fileLabelSearchTerm) {
if (fileLabelSearchTerm != null) {
this.fileLabelSearchTerm = fileLabelSearchTerm.trim();
}
}
private List<FileMetadata> fileMetadatasSearch;
public List<FileMetadata> getFileMetadatasSearch() {
return fileMetadatasSearch;
}
public void setFileMetadatasSearch(List<FileMetadata> fileMetadatasSearch) {
this.fileMetadatasSearch = fileMetadatasSearch;
}
public void updateFileSearch(){
logger.info("updating file search list");
if (readOnly) {
this.fileMetadatasSearch = selectFileMetadatasForDisplay(this.fileLabelSearchTerm);
} else {
this.fileMetadatasSearch = datafileService.findFileMetadataByDatasetVersionIdLabelSearchTerm(workingVersion.getId(), this.fileLabelSearchTerm, "", "");
}
}
private Long numberOfFilesToShow = (long) 25;
public Long getNumberOfFilesToShow() {
return numberOfFilesToShow;
}
public void setNumberOfFilesToShow(Long numberOfFilesToShow) {
this.numberOfFilesToShow = numberOfFilesToShow;
}
public void showAll(){
setNumberOfFilesToShow(new Long(fileMetadatasSearch.size()));
}
private List<FileMetadata> selectFileMetadatasForDisplay(String searchTerm) {
Set<Long> searchResultsIdSet = null;
if (searchTerm != null && !searchTerm.equals("")) {
List<Integer> searchResultsIdList = datafileService.findFileMetadataIdsByDatasetVersionIdLabelSearchTerm(workingVersion.getId(), searchTerm, "", "");
searchResultsIdSet = new HashSet<>();
for (Integer id : searchResultsIdList) {
searchResultsIdSet.add(id.longValue());
}
}
List<FileMetadata> retList = new ArrayList<>();
for (FileMetadata fileMetadata : workingVersion.getFileMetadatasSorted()) {
if (searchResultsIdSet == null || searchResultsIdSet.contains(fileMetadata.getId())) {
retList.add(fileMetadata);
}
}
return retList;
}
/*
Save the setting locally so db isn't hit repeatedly
This may be "null", signifying unlimited download size
*/
public Long getMaxFileUploadSizeInBytes(){
return this.maxFileUploadSizeInBytes;
}
public boolean isUnlimitedUploadFileSize(){
if (this.maxFileUploadSizeInBytes == null){
return true;
}
return false;
}
public String getDataverseSiteUrl() {
return this.dataverseSiteUrl;
}
public void setDataverseSiteUrl(String dataverseSiteUrl) {
this.dataverseSiteUrl = dataverseSiteUrl;
}
public DataFile getInitialDataFile() {
if (workingVersion.getFileMetadatas() != null && workingVersion.getFileMetadatas().size() > 0) {
return workingVersion.getFileMetadatas().get(0).getDataFile();
}
return null;
}
public SwiftAccessIO getSwiftObject() {
try {
StorageIO<DataFile> storageIO = getInitialDataFile() == null ? null : getInitialDataFile().getStorageIO();
if (storageIO != null && storageIO instanceof SwiftAccessIO) {
return (SwiftAccessIO)storageIO;
} else {
logger.fine("DatasetPage: Failed to cast storageIO as SwiftAccessIO (most likely because storageIO is a FileAccessIO)");
}
} catch (IOException e) {
logger.fine("DatasetPage: Failed to get storageIO");
}
return null;
}
public String getSwiftContainerName() throws IOException {
SwiftAccessIO swiftObject = getSwiftObject();
try {
swiftObject.open();
return swiftObject.getSwiftContainerName();
} catch (Exception e){
logger.info("DatasetPage: Failed to open swift object");
}
return "";
}
public void setSwiftContainerName(String name){
}
//This function applies to an entire dataset
private boolean isSwiftStorage() {
//containers without datafiles will not be stored in swift storage
if (getInitialDataFile() != null){
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
//if any of the datafiles are stored in swift
if (fmd.getDataFile().getStorageIdentifier().startsWith("swift://")) {
return true;
}
}
}
return false;
}
//This function applies to a single datafile
private boolean isSwiftStorage(FileMetadata metadata){
if (metadata.getDataFile().getStorageIdentifier().startsWith("swift://")) {
return true;
}
return false;
}
private Boolean showComputeButtonForDataset = null;
//This function applies to an entire dataset
public boolean showComputeButton() {
if (showComputeButtonForDataset != null) {
return showComputeButtonForDataset;
}
if (isSwiftStorage() && (settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) != null)) {
showComputeButtonForDataset = true;
} else {
showComputeButtonForDataset = false;
}
return showComputeButtonForDataset;
}
private Map<Long, Boolean> showComputeButtonForFile = new HashMap<>();
//this function applies to a single datafile
public boolean showComputeButton(FileMetadata metadata) {
Long fileId = metadata.getDataFile().getId();
if (fileId == null) {
return false;
}
if (showComputeButtonForFile.containsKey(fileId)) {
return showComputeButtonForFile.get(fileId);
}
boolean result = isSwiftStorage(metadata)
&& settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) != null;
showComputeButtonForFile.put(fileId, result);
return result;
}
public boolean canComputeAllFiles(boolean isCartCompute){
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (!fileDownloadHelper.canDownloadFile(fmd)) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('computeInvalid').show()");
return false;
}
}
if (!isCartCompute) {
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(getComputeUrl());
} catch (IOException ioex) {
logger.warning("Failed to issue a redirect.");
}
}
return true;
}
/*
in getComputeUrl(), we are sending the container/dataset name and the exipiry and signature
for the temporary url of only ONE datafile within the dataset. This is because in the
ceph version of swift, we are only able to generate the temporary url for a single object
within a container.
Ideally, we want a temporary url for an entire container/dataset, so perhaps this could instead
be handled on the compute environment end.
Additionally, we have to think about the implications this could have with dataset versioning,
since we currently store all files (even from old versions) in the same container.
--SF
*/
public String getComputeUrl() throws IOException {
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getPersistentId();
//WHEN we are able to get a temp url for a dataset
//return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?containerName=" + swiftObject.getSwiftContainerName() + "&temp_url_sig=" + swiftObject.getTempUrlSignature() + "&temp_url_expires=" + swiftObject.getTempUrlExpiry();
}
//For a single file
public String getComputeUrl(FileMetadata metadata) {
SwiftAccessIO swiftObject = null;
try {
StorageIO<DataFile> storageIO = metadata.getDataFile().getStorageIO();
if (storageIO != null && storageIO instanceof SwiftAccessIO) {
swiftObject = (SwiftAccessIO)storageIO;
swiftObject.open();
}
} catch (IOException e) {
logger.info("DatasetPage: Failed to get storageIO");
}
if (settingsWrapper.isTrueForKey(SettingsServiceBean.Key.PublicInstall, false)) {
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getPersistentId() + "=" + swiftObject.getSwiftFileName();
}
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getPersistentId() + "=" + swiftObject.getSwiftFileName() + "&temp_url_sig=" + swiftObject.getTempUrlSignature() + "&temp_url_expires=" + swiftObject.getTempUrlExpiry();
}
public String getCloudEnvironmentName() {
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.CloudEnvironmentName);
}
public DataFile getSelectedDownloadFile() {
return selectedDownloadFile;
}
public void setSelectedDownloadFile(DataFile selectedDownloadFile) {
this.selectedDownloadFile = selectedDownloadFile;
}
public List<DataFile> getNewFiles() {
return newFiles;
}
public void setNewFiles(List<DataFile> newFiles) {
this.newFiles = newFiles;
}
public Dataverse getLinkingDataverse() {
return linkingDataverse;
}
public void setLinkingDataverse(Dataverse linkingDataverse) {
this.linkingDataverse = linkingDataverse;
}
public List<SelectItem> getLinkingDVSelectItems() {
return linkingDVSelectItems;
}
public void setLinkingDVSelectItems(List<SelectItem> linkingDVSelectItems) {
this.linkingDVSelectItems = linkingDVSelectItems;
}
public Long getLinkingDataverseId() {
return linkingDataverseId;
}
public void setLinkingDataverseId(Long linkingDataverseId) {
this.linkingDataverseId = linkingDataverseId;
}
public void updateReleasedVersions(){
setReleasedVersionTabList(resetReleasedVersionTabList());
}
public void updateSelectedLinkingDV(ValueChangeEvent event) {
linkingDataverseId = (Long) event.getNewValue();
}
public boolean isNoDVsAtAll() {
return noDVsAtAll;
}
public void setNoDVsAtAll(boolean noDVsAtAll) {
this.noDVsAtAll = noDVsAtAll;
}
public boolean isNoDVsRemaining() {
return noDVsRemaining;
}
private Map<Long, String> datafileThumbnailsMap = new HashMap<>();
public boolean isThumbnailAvailable(FileMetadata fileMetadata) {
// new and optimized logic:
// - check download permission here (should be cached - so it's free!)
// - only then check if the thumbnail is available/exists.
// then cache the results!
Long dataFileId = fileMetadata.getDataFile().getId();
if (datafileThumbnailsMap.containsKey(dataFileId)) {
return !"".equals(datafileThumbnailsMap.get(dataFileId));
}
if (!FileUtil.isThumbnailSupported(fileMetadata.getDataFile())) {
datafileThumbnailsMap.put(dataFileId, "");
return false;
}
if (!this.fileDownloadHelper.canDownloadFile(fileMetadata)) {
datafileThumbnailsMap.put(dataFileId, "");
return false;
}
String thumbnailAsBase64 = ImageThumbConverter.getImageThumbnailAsBase64(fileMetadata.getDataFile(), ImageThumbConverter.DEFAULT_THUMBNAIL_SIZE);
//if (datafileService.isThumbnailAvailable(fileMetadata.getDataFile())) {
if (!StringUtil.isEmpty(thumbnailAsBase64)) {
datafileThumbnailsMap.put(dataFileId, thumbnailAsBase64);
return true;
}
datafileThumbnailsMap.put(dataFileId, "");
return false;
}
public String getDataFileThumbnailAsBase64(FileMetadata fileMetadata) {
return datafileThumbnailsMap.get(fileMetadata.getDataFile().getId());
}
// Another convenience method - to cache Update Permission on the dataset:
public boolean canUpdateDataset() {
return permissionsWrapper.canUpdateDataset(dvRequestService.getDataverseRequest(), this.dataset);
}
public boolean canPublishDataverse() {
return permissionsWrapper.canIssuePublishDataverseCommand(dataset.getOwner());
}
public boolean canViewUnpublishedDataset() {
return permissionsWrapper.canViewUnpublishedDataset( dvRequestService.getDataverseRequest(), dataset);
}
/*
* 4.2.1 optimization.
* HOWEVER, this doesn't appear to be saving us anything!
* i.e., it's just as cheap to use session.getUser().isAuthenticated()
* every time; it doesn't do any new db lookups.
*/
public boolean isSessionUserAuthenticated() {
return session.getUser().isAuthenticated();
}
/**
* For use in the Dataset page
* @return
*/
public boolean isSuperUser(){
if (!this.isSessionUserAuthenticated()){
return false;
}
if (this.session.getUser().isSuperuser()){
return true;
}
return false;
}
/*
TODO/OPTIMIZATION: This is still costing us N SELECT FROM GuestbookResponse queries,
where N is the number of files. This could of course be replaced by a query that'll
look up all N at once... Not sure if it's worth it; especially now that N
will always be 10, for the initial page load. -- L.A. 4.2.1
*/
public Long getGuestbookResponseCount(FileMetadata fileMetadata) {
return guestbookResponseService.getCountGuestbookResponsesByDataFileId(fileMetadata.getDataFile().getId());
}
/**
* Check Dataset related permissions
*
* @param permissionToCheck
* @return
*/
public boolean doesSessionUserHaveDataSetPermission(Permission permissionToCheck){
if (permissionToCheck == null){
return false;
}
String permName = permissionToCheck.getHumanName();
// Has this check already been done?
//
if (this.datasetPermissionMap.containsKey(permName)){
// Yes, return previous answer
return this.datasetPermissionMap.get(permName);
}
// Check the permission
//
boolean hasPermission = this.permissionService.userOn(this.session.getUser(), this.dataset).has(permissionToCheck);
// Save the permission
this.datasetPermissionMap.put(permName, hasPermission);
// return true/false
return hasPermission;
}
public void setNoDVsRemaining(boolean noDVsRemaining) {
this.noDVsRemaining = noDVsRemaining;
}
private final Map<Long, MapLayerMetadata> mapLayerMetadataLookup = new HashMap<>();
private GuestbookResponse guestbookResponse;
private Guestbook selectedGuestbook;
public GuestbookResponse getGuestbookResponse() {
return guestbookResponse;
}
public void setGuestbookResponse(GuestbookResponse guestbookResponse) {
this.guestbookResponse = guestbookResponse;
}
public Guestbook getSelectedGuestbook() {
return selectedGuestbook;
}
public void setSelectedGuestbook(Guestbook selectedGuestbook) {
this.selectedGuestbook = selectedGuestbook;
}
public void viewSelectedGuestbook(Guestbook selectedGuestbook) {
this.selectedGuestbook = selectedGuestbook;
}
public void reset() {
dataset.setGuestbook(null);
}
public int getFilePaginatorPage() {
return filePaginatorPage;
}
public void setFilePaginatorPage(int filePaginatorPage) {
this.filePaginatorPage = filePaginatorPage;
}
public int getRowsPerPage() {
return rowsPerPage;
}
public void setRowsPerPage(int rowsPerPage) {
this.rowsPerPage = rowsPerPage;
}
public String getGlobalId() {
return persistentId;
}
public String getPersistentId() {
return persistentId;
}
public void setPersistentId(String persistentId) {
this.persistentId = persistentId;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getShowVersionList() {
return showVersionList;
}
public void setShowVersionList(String showVersionList) {
this.showVersionList = showVersionList;
}
public String getShowOtherText() {
return showOtherText;
}
public void setShowOtherText(String showOtherText) {
this.showOtherText = showOtherText;
}
private String showOtherText = "false";
public String getDeaccessionForwardURLFor() {
return deaccessionForwardURLFor;
}
public void setDeaccessionForwardURLFor(String deaccessionForwardURLFor) {
this.deaccessionForwardURLFor = deaccessionForwardURLFor;
}
private DatasetVersionDifference datasetVersionDifference;
public String getDeaccessionReasonText() {
return deaccessionReasonText;
}
public void setDeaccessionReasonText(String deaccessionReasonText) {
this.deaccessionReasonText = deaccessionReasonText;
}
public String getDisplayCitation() {
//displayCitation = dataset.getCitation(false, workingVersion);
return displayCitation;
}
public void setDisplayCitation(String displayCitation) {
this.displayCitation = displayCitation;
}
public String getDropBoxSelection() {
return dropBoxSelection;
}
public String getDropBoxKey() {
// Site-specific DropBox application registration key is configured
// via a JVM option under glassfish.
//if (true)return "some-test-key"; // for debugging
String configuredDropBoxKey = System.getProperty("dataverse.dropbox.key");
if (configuredDropBoxKey != null) {
return configuredDropBoxKey;
}
return "";
}
public void setDropBoxSelection(String dropBoxSelection) {
this.dropBoxSelection = dropBoxSelection;
}
public Dataset getDataset() {
return dataset;
}
public void setDataset(Dataset dataset) {
this.dataset = dataset;
}
public DatasetVersion getWorkingVersion() {
return workingVersion;
}
public EditMode getEditMode() {
return editMode;
}
public void setEditMode(EditMode editMode) {
this.editMode = editMode;
}
public Long getOwnerId() {
return ownerId;
}
public void setOwnerId(Long ownerId) {
this.ownerId = ownerId;
}
public Long getVersionId() {
return versionId;
}
public void setVersionId(Long versionId) {
this.versionId = versionId;
}
public int getSelectedTabIndex() {
return selectedTabIndex;
}
public void setSelectedTabIndex(int selectedTabIndex) {
this.selectedTabIndex = selectedTabIndex;
}
public int getReleaseRadio() {
return releaseRadio;
}
public void setReleaseRadio(int releaseRadio) {
this.releaseRadio = releaseRadio;
}
public String getDatasetNextMajorVersion() {
return datasetNextMajorVersion;
}
public void setDatasetNextMajorVersion(String datasetNextMajorVersion) {
this.datasetNextMajorVersion = datasetNextMajorVersion;
}
public String getDatasetNextMinorVersion() {
return datasetNextMinorVersion;
}
public void setDatasetNextMinorVersion(String datasetNextMinorVersion) {
this.datasetNextMinorVersion = datasetNextMinorVersion;
}
public int getDeaccessionReasonRadio() {
return deaccessionReasonRadio;
}
public void setDeaccessionReasonRadio(int deaccessionReasonRadio) {
this.deaccessionReasonRadio = deaccessionReasonRadio;
}
public int getDeaccessionRadio() {
return deaccessionRadio;
}
public void setDeaccessionRadio(int deaccessionRadio) {
this.deaccessionRadio = deaccessionRadio;
}
public List<Template> getDataverseTemplates() {
return dataverseTemplates;
}
public void setDataverseTemplates(List<Template> dataverseTemplates) {
this.dataverseTemplates = dataverseTemplates;
}
public Template getDefaultTemplate() {
return defaultTemplate;
}
public void setDefaultTemplate(Template defaultTemplate) {
this.defaultTemplate = defaultTemplate;
}
public Template getSelectedTemplate() {
return selectedTemplate;
}
public void setSelectedTemplate(Template selectedTemplate) {
this.selectedTemplate = selectedTemplate;
}
public void updateSelectedTemplate(ValueChangeEvent event) {
selectedTemplate = (Template) event.getNewValue();
if (selectedTemplate != null) {
//then create new working version from the selected template
workingVersion.updateDefaultValuesFromTemplate(selectedTemplate);
updateDatasetFieldInputLevels();
} else {
workingVersion.initDefaultValues();
updateDatasetFieldInputLevels();
}
resetVersionUI();
}
/*
// Original
private void updateDatasetFieldInputLevels() {
Long dvIdForInputLevel = ownerId;
if (!dataverseService.find(ownerId).isMetadataBlockRoot()) {
dvIdForInputLevel = dataverseService.find(ownerId).getMetadataRootId();
}
for (DatasetField dsf : workingVersion.getFlatDatasetFields()) {
DataverseFieldTypeInputLevel dsfIl = dataverseFieldTypeInputLevelService.findByDataverseIdDatasetFieldTypeId(dvIdForInputLevel, dsf.getDatasetFieldType().getId());
if (dsfIl != null) {
dsf.setInclude(dsfIl.isInclude());
} else {
dsf.setInclude(true);
}
}
}*/
/***
*
* Note: Updated to retrieve DataverseFieldTypeInputLevel objects in single query
*
*/
private void updateDatasetFieldInputLevels() {
Long dvIdForInputLevel = ownerId;
// OPTIMIZATION (?): replaced "dataverseService.find(ownerId)" with
// simply dataset.getOwner()... saves us a few lookups.
// TODO: could there possibly be any reason we want to look this
// dataverse up by the id here?? -- L.A. 4.2.1
if (!dataset.getOwner().isMetadataBlockRoot()) {
dvIdForInputLevel = dataset.getOwner().getMetadataRootId();
}
/* ---------------------------------------------------------
Map to hold DatasetFields
Format: { DatasetFieldType.id : DatasetField }
--------------------------------------------------------- */
// Initialize Map
Map<Long, DatasetField> mapDatasetFields = new HashMap<>();
// Populate Map
for (DatasetField dsf : workingVersion.getFlatDatasetFields()) {
if (dsf.getDatasetFieldType().getId() != null){
mapDatasetFields.put(dsf.getDatasetFieldType().getId(), dsf);
}
}
/* ---------------------------------------------------------
Retrieve List of DataverseFieldTypeInputLevel objects
Use the DatasetFieldType id's which are the Map's keys
--------------------------------------------------------- */
List<Long> idList = new ArrayList<>(mapDatasetFields.keySet());
List<DataverseFieldTypeInputLevel> dsFieldTypeInputLevels = dataverseFieldTypeInputLevelService.findByDataverseIdAndDatasetFieldTypeIdList(dvIdForInputLevel, idList);
/* ---------------------------------------------------------
Iterate through List of DataverseFieldTypeInputLevel objects
Call "setInclude" on its related DatasetField object
--------------------------------------------------------- */
for (DataverseFieldTypeInputLevel oneDSFieldTypeInputLevel : dsFieldTypeInputLevels){
if (oneDSFieldTypeInputLevel != null) {
// Is the DatasetField in the hash? hash format: { DatasetFieldType.id : DatasetField }
DatasetField dsf = mapDatasetFields.get(oneDSFieldTypeInputLevel.getDatasetFieldType().getId());
if (dsf != null){
// Yes, call "setInclude"
dsf.setInclude(oneDSFieldTypeInputLevel.isInclude());
// remove from hash
mapDatasetFields.remove(oneDSFieldTypeInputLevel.getDatasetFieldType().getId());
}
}
} // end: updateDatasetFieldInputLevels
/* ---------------------------------------------------------
Iterate through any DatasetField objects remaining in the hash
Call "setInclude(true) on each one
--------------------------------------------------------- */
for ( DatasetField dsf : mapDatasetFields.values()) {
if (dsf != null){
dsf.setInclude(true);
}
}
}
public void handleChange() {
logger.fine("handle change");
logger.fine("new value " + selectedTemplate.getId());
}
public void handleChangeButton() {
}
public boolean isShapefileType(FileMetadata fm) {
if (fm == null) {
return false;
}
if (fm.getDataFile() == null) {
return false;
}
return fm.getDataFile().isShapefileType();
}
/*
Check if the FileMetadata.dataFile has an associated MapLayerMetadata object
The MapLayerMetadata objects have been fetched at page inception by "loadMapLayerMetadataLookup()"
*/
public boolean hasMapLayerMetadata(FileMetadata fm) {
if (fm == null) {
return false;
}
if (fm.getDataFile() == null) {
return false;
}
return doesDataFileHaveMapLayerMetadata(fm.getDataFile());
}
/**
* Check if a DataFile has an associated MapLayerMetadata object
*
* The MapLayerMetadata objects have been fetched at page inception by
* "loadMapLayerMetadataLookup()"
*/
private boolean doesDataFileHaveMapLayerMetadata(DataFile df) {
if (df == null) {
return false;
}
if (df.getId() == null) {
return false;
}
return this.mapLayerMetadataLookup.containsKey(df.getId());
}
/**
* Using a DataFile id, retrieve an associated MapLayerMetadata object
*
* The MapLayerMetadata objects have been fetched at page inception by
* "loadMapLayerMetadataLookup()"
*/
public MapLayerMetadata getMapLayerMetadata(DataFile df) {
if (df == null) {
return null;
}
return this.mapLayerMetadataLookup.get(df.getId());
}
private void msg(String s){
// System.out.println(s);
}
/**
* Create a hashmap consisting of { DataFile.id : MapLayerMetadata object}
*
* Very few DataFiles will have associated MapLayerMetadata objects so only
* use 1 query to get them
*/
private void loadMapLayerMetadataLookup() {
if (this.dataset == null) {
}
if (this.dataset.getId() == null) {
return;
}
List<MapLayerMetadata> mapLayerMetadataList = mapLayerMetadataService.getMapLayerMetadataForDataset(this.dataset);
if (mapLayerMetadataList == null) {
return;
}
for (MapLayerMetadata layer_metadata : mapLayerMetadataList) {
mapLayerMetadataLookup.put(layer_metadata.getDataFile().getId(), layer_metadata);
}
}// A DataFile may have a related MapLayerMetadata object
private List<FileMetadata> displayFileMetadata;
public List<FileMetadata> getDisplayFileMetadata() {
return displayFileMetadata;
}
public void setDisplayFileMetadata(List<FileMetadata> displayFileMetadata) {
this.displayFileMetadata = displayFileMetadata;
}
private boolean readOnly = true;
public String init() {
return init(true);
}
public String initCitation() {
return init(false);
}
private String init(boolean initFull) {
//System.out.println("_YE_OLDE_QUERY_COUNTER_"); // for debug purposes
this.maxFileUploadSizeInBytes = systemConfig.getMaxFileUploadSize();
setDataverseSiteUrl(systemConfig.getDataverseSiteUrl());
guestbookResponse = new GuestbookResponse();
String nonNullDefaultIfKeyNotFound = "";
protocol = settingsWrapper.getValueForKey(SettingsServiceBean.Key.Protocol, nonNullDefaultIfKeyNotFound);
authority = settingsWrapper.getValueForKey(SettingsServiceBean.Key.Authority, nonNullDefaultIfKeyNotFound);
if (dataset.getId() != null || versionId != null || persistentId != null) { // view mode for a dataset
DatasetVersionServiceBean.RetrieveDatasetVersionResponse retrieveDatasetVersionResponse = null;
// ---------------------------------------
// Set the workingVersion and Dataset
// ---------------------------------------
if (persistentId != null) {
logger.fine("initializing DatasetPage with persistent ID " + persistentId);
// Set Working Version and Dataset by PersistentID
dataset = datasetService.findByGlobalId(persistentId);
if (dataset == null) {
logger.warning("No such dataset: "+persistentId);
return permissionsWrapper.notFound();
}
logger.fine("retrieved dataset, id="+dataset.getId());
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByPersistentId(persistentId, version);
this.workingVersion = retrieveDatasetVersionResponse.getDatasetVersion();
logger.fine("retrieved version: id: " + workingVersion.getId() + ", state: " + this.workingVersion.getVersionState());
} else if (dataset.getId() != null) {
// Set Working Version and Dataset by Datasaet Id and Version
dataset = datasetService.find(dataset.getId());
if (dataset == null) {
logger.warning("No such dataset: "+dataset);
return permissionsWrapper.notFound();
}
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionById(dataset.getId(), version);
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
this.workingVersion = retrieveDatasetVersionResponse.getDatasetVersion();
logger.info("retreived version: id: " + workingVersion.getId() + ", state: " + this.workingVersion.getVersionState());
} else if (versionId != null) {
// TODO: 4.2.1 - this method is broken as of now!
// Set Working Version and Dataset by DatasaetVersion Id
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByVersionId(versionId);
}
if (retrieveDatasetVersionResponse == null) {
return permissionsWrapper.notFound();
}
//this.dataset = this.workingVersion.getDataset();
// end: Set the workingVersion and Dataset
// ---------------------------------------
// Is the DatasetVersion or Dataset null?
//
if (workingVersion == null || this.dataset == null) {
return permissionsWrapper.notFound();
}
// Is the Dataset harvested?
if (dataset.isHarvested()) {
// if so, we'll simply forward to the remote URL for the original
// source of this harvested dataset:
String originalSourceURL = dataset.getRemoteArchiveURL();
if (originalSourceURL != null && !originalSourceURL.equals("")) {
logger.fine("redirecting to "+originalSourceURL);
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(originalSourceURL);
} catch (IOException ioex) {
// must be a bad URL...
// we don't need to do anything special here - we'll redirect
// to the local 404 page, below.
logger.warning("failed to issue a redirect to "+originalSourceURL);
}
return originalSourceURL;
}
return permissionsWrapper.notFound();
}
// Check permisisons
if (!(workingVersion.isReleased() || workingVersion.isDeaccessioned()) && !this.canViewUnpublishedDataset()) {
return permissionsWrapper.notAuthorized();
}
if (!retrieveDatasetVersionResponse.wasRequestedVersionRetrieved()) {
//msg("checkit " + retrieveDatasetVersionResponse.getDifferentVersionMessage());
JsfHelper.addWarningMessage(retrieveDatasetVersionResponse.getDifferentVersionMessage());//JH.localize("dataset.message.metadataSuccess"));
}
// init the citation
displayCitation = dataset.getCitation(true, workingVersion);
if (initFull) {
// init the list of FileMetadatas
if (workingVersion.isDraft() && canUpdateDataset()) {
readOnly = false;
} else {
// an attempt to retreive both the filemetadatas and datafiles early on, so that
// we don't have to do so later (possibly, many more times than necessary):
datafileService.findFileMetadataOptimizedExperimental(dataset);
}
fileMetadatasSearch = workingVersion.getFileMetadatasSorted();
ownerId = dataset.getOwner().getId();
datasetNextMajorVersion = this.dataset.getNextMajorVersionString();
datasetNextMinorVersion = this.dataset.getNextMinorVersionString();
datasetVersionUI = datasetVersionUI.initDatasetVersionUI(workingVersion, false);
updateDatasetFieldInputLevels();
setExistReleasedVersion(resetExistRealeaseVersion());
//moving setVersionTabList to tab change event
//setVersionTabList(resetVersionTabList());
//setReleasedVersionTabList(resetReleasedVersionTabList());
//SEK - lazymodel may be needed for datascroller in future release
// lazyModel = new LazyFileMetadataDataModel(workingVersion.getId(), datafileService );
// populate MapLayerMetadata
this.loadMapLayerMetadataLookup(); // A DataFile may have a related MapLayerMetadata object
this.guestbookResponse = guestbookResponseService.initGuestbookResponseForFragment(workingVersion, null, session);
this.getFileDownloadHelper().setGuestbookResponse(guestbookResponse);
logger.fine("Checking if rsync support is enabled.");
if (DataCaptureModuleUtil.rsyncSupportEnabled(settingsWrapper.getValueForKey(SettingsServiceBean.Key.UploadMethods))) {
try {
ScriptRequestResponse scriptRequestResponse = commandEngine.submit(new RequestRsyncScriptCommand(dvRequestService.getDataverseRequest(), dataset));
logger.fine("script: " + scriptRequestResponse.getScript());
if(scriptRequestResponse.getScript()!=null && !scriptRequestResponse.getScript().isEmpty()){
setHasRsyncScript(true);
setRsyncScript(scriptRequestResponse.getScript());
rsyncScriptFilename = "upload-"+ workingVersion.getDataset().getIdentifier() + ".bash";
rsyncScriptFilename = rsyncScriptFilename.replace("/", "_");
}
else{
setHasRsyncScript(false);
}
} catch (RuntimeException ex) {
logger.warning("Problem getting rsync script: " + ex.getLocalizedMessage());
} catch (CommandException cex) {
logger.warning("Problem getting rsync script (Command Exception): " + cex.getLocalizedMessage());
}
}
}
} else if (ownerId != null) {
// create mode for a new child dataset
readOnly = false;
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setProtocol(protocol);
dataset.setAuthority(authority);
//Wait until the create command before actually getting an identifier
if (dataset.getOwner() == null) {
return permissionsWrapper.notFound();
} else if (!permissionService.on(dataset.getOwner()).has(Permission.AddDataset)) {
return permissionsWrapper.notAuthorized();
}
dataverseTemplates.addAll(dataverseService.find(ownerId).getTemplates());
if (!dataverseService.find(ownerId).isTemplateRoot()) {
dataverseTemplates.addAll(dataverseService.find(ownerId).getParentTemplates());
}
Collections.sort(dataverseTemplates, (Template t1, Template t2) -> t1.getName().compareToIgnoreCase(t2.getName()));
defaultTemplate = dataverseService.find(ownerId).getDefaultTemplate();
if (defaultTemplate != null) {
selectedTemplate = defaultTemplate;
for (Template testT : dataverseTemplates) {
if (defaultTemplate.getId().equals(testT.getId())) {
selectedTemplate = testT;
}
}
workingVersion = dataset.getEditVersion(selectedTemplate);
updateDatasetFieldInputLevels();
} else {
workingVersion = dataset.getCreateVersion();
updateDatasetFieldInputLevels();
}
if (settingsWrapper.isTrueForKey(SettingsServiceBean.Key.PublicInstall, false)){
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.message.publicInstall"));
}
resetVersionUI();
// FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Add New Dataset", " - Enter metadata to create the dataset's citation. You can add more metadata about this dataset after it's created."));
} else {
return permissionsWrapper.notFound();
}
try {
privateUrl = commandEngine.submit(new GetPrivateUrlCommand(dvRequestService.getDataverseRequest(), dataset));
if (privateUrl != null) {
JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.privateurl.infoMessageAuthor", Arrays.asList(getPrivateUrlLink(privateUrl))));
}
} catch (CommandException ex) {
// No big deal. The user simply doesn't have access to create or delete a Private URL.
}
if (session.getUser() instanceof PrivateUrlUser) {
PrivateUrlUser privateUrlUser = (PrivateUrlUser) session.getUser();
if (dataset != null && dataset.getId().equals(privateUrlUser.getDatasetId())) {
JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.privateurl.infoMessageReviewer"));
}
}
// Various info messages, when the dataset is locked (for various reasons):
if (dataset.isLocked()) {
if (dataset.isLockedFor(DatasetLock.Reason.Workflow)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"),
BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress"));
}
if (dataset.isLockedFor(DatasetLock.Reason.InReview)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.inReview.message"),
BundleUtil.getStringFromBundle("dataset.inreview.infoMessage"));
}
if (dataset.isLockedFor(DatasetLock.Reason.DcmUpload)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.summary"),
BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.details"));
lockedDueToDcmUpload = true;
}
//This is a hack to remove dataset locks for File PID registration if
//the dataset is released
//in testing we had cases where datasets with 1000 files were remaining locked after being published successfully
/*if(dataset.getLatestVersion().isReleased() && dataset.isLockedFor(DatasetLock.Reason.pidRegister)){
datasetService.removeDatasetLocks(dataset.getId(), DatasetLock.Reason.pidRegister);
}*/
if (dataset.isLockedFor(DatasetLock.Reason.pidRegister)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.pidRegister.workflow.inprogress"),
BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress"));
}
}
for(DataFile f : dataset.getFiles()) {
if(f.isTabularData()) {
hasTabular = true;
break;
}
}
configureTools = externalToolService.findByType(ExternalTool.Type.CONFIGURE);
exploreTools = externalToolService.findByType(ExternalTool.Type.EXPLORE);
rowsPerPage = 10;
return null;
}
public boolean isHasTabular() {
return hasTabular;
}
public boolean isReadOnly() {
return readOnly;
}
private void resetVersionUI() {
datasetVersionUI = datasetVersionUI.initDatasetVersionUI(workingVersion, true);
if (isSessionUserAuthenticated()) {
AuthenticatedUser au = (AuthenticatedUser) session.getUser();
//On create set pre-populated fields
for (DatasetField dsf : dataset.getEditVersion().getDatasetFields()) {
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.depositor) && dsf.isEmpty()) {
dsf.getDatasetFieldValues().get(0).setValue(au.getLastName() + ", " + au.getFirstName());
}
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.dateOfDeposit) && dsf.isEmpty()) {
dsf.getDatasetFieldValues().get(0).setValue(new SimpleDateFormat("yyyy-MM-dd").format(new Timestamp(new Date().getTime())));
}
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContact) && dsf.isEmpty()) {
for (DatasetFieldCompoundValue contactValue : dsf.getDatasetFieldCompoundValues()) {
for (DatasetField subField : contactValue.getChildDatasetFields()) {
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContactName)) {
subField.getDatasetFieldValues().get(0).setValue(au.getLastName() + ", " + au.getFirstName());
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContactAffiliation)) {
subField.getDatasetFieldValues().get(0).setValue(au.getAffiliation());
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContactEmail)) {
subField.getDatasetFieldValues().get(0).setValue(au.getEmail());
}
}
}
}
String creatorOrcidId = au.getOrcidId();
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.author) && dsf.isEmpty()) {
for (DatasetFieldCompoundValue authorValue : dsf.getDatasetFieldCompoundValues()) {
for (DatasetField subField : authorValue.getChildDatasetFields()) {
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorName)) {
subField.getDatasetFieldValues().get(0).setValue(au.getLastName() + ", " + au.getFirstName());
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorAffiliation)) {
subField.getDatasetFieldValues().get(0).setValue(au.getAffiliation());
}
if (creatorOrcidId != null) {
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorIdValue)) {
subField.getDatasetFieldValues().get(0).setValue(creatorOrcidId);
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorIdType)) {
DatasetFieldType authorIdTypeDatasetField = fieldService.findByName(DatasetFieldConstant.authorIdType);
subField.setSingleControlledVocabularyValue(fieldService.findControlledVocabularyValueByDatasetFieldTypeAndStrValue(authorIdTypeDatasetField, "ORCID", true));
}
}
}
}
}
}
}
}
private boolean bulkUpdateCheckVersion(){
return workingVersion.isReleased();
}
private void refreshSelectedFiles(){
if (readOnly) {
dataset = datasetService.find(dataset.getId());
}
String termsOfAccess = workingVersion.getTermsOfUseAndAccess().getTermsOfAccess();
boolean requestAccess = workingVersion.getTermsOfUseAndAccess().isFileAccessRequest();
workingVersion = dataset.getEditVersion();
workingVersion.getTermsOfUseAndAccess().setTermsOfAccess(termsOfAccess);
workingVersion.getTermsOfUseAndAccess().setFileAccessRequest(requestAccess);
List <FileMetadata> newSelectedFiles = new ArrayList<>();
for (FileMetadata fmd : selectedFiles){
for (FileMetadata fmdn: workingVersion.getFileMetadatas()){
if (fmd.getDataFile().equals(fmdn.getDataFile())){
newSelectedFiles.add(fmdn);
}
}
}
selectedFiles.clear();
for (FileMetadata fmdn : newSelectedFiles ){
selectedFiles.add(fmdn);
}
readOnly = false;
}
public void testSelectedFilesForMapData(){
setSelectedFilesHasMapLayer(false);
for (FileMetadata fmd : selectedFiles){
if(worldMapPermissionHelper.hasMapLayerMetadata(fmd)){
setSelectedFilesHasMapLayer(true);
return; //only need one for warning message
}
}
}
private boolean selectedFilesHasMapLayer;
public boolean isSelectedFilesHasMapLayer() {
return selectedFilesHasMapLayer;
}
public void setSelectedFilesHasMapLayer(boolean selectedFilesHasMapLayer) {
this.selectedFilesHasMapLayer = selectedFilesHasMapLayer;
}
private Integer chunkSize = 25;
public Integer getChunkSize() {
return chunkSize;
}
public void setChunkSize(Integer chunkSize) {
this.chunkSize = chunkSize;
}
public void viewAllButtonPress(){
setChunkSize(fileMetadatasSearch.size());
}
private int activeTabIndex;
public int getActiveTabIndex() {
return activeTabIndex;
}
public void setActiveTabIndex(int activeTabIndex) {
this.activeTabIndex = activeTabIndex;
}
public void tabChanged(TabChangeEvent event) {
TabView tv = (TabView) event.getComponent();
this.activeTabIndex = tv.getActiveIndex();
if (this.activeTabIndex == 3) {
setVersionTabList(resetVersionTabList());
setReleasedVersionTabList(resetReleasedVersionTabList());
} else {
releasedVersionTabList = new ArrayList<>();
versionTabList = new ArrayList<>();
if(this.activeTabIndex == 0) {
init();
}
}
}
public void edit(EditMode editMode) {
this.editMode = editMode;
if (this.readOnly) {
dataset = datasetService.find(dataset.getId());
}
workingVersion = dataset.getEditVersion();
if (editMode == EditMode.INFO) {
// ?
} else if (editMode == EditMode.FILE) {
// JH.addMessage(FacesMessage.SEVERITY_INFO, JH.localize("dataset.message.editFiles"));
// FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Upload + Edit Dataset Files", " - You can drag and drop your files from your desktop, directly into the upload widget."));
} else if (editMode.equals(EditMode.METADATA)) {
datasetVersionUI = datasetVersionUI.initDatasetVersionUI(workingVersion, true);
updateDatasetFieldInputLevels();
JH.addMessage(FacesMessage.SEVERITY_INFO, JH.localize("dataset.message.editMetadata"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Edit Dataset Metadata", " - Add more metadata about your dataset to help others easily find it."));
} else if (editMode.equals(EditMode.LICENSE)){
JH.addMessage(FacesMessage.SEVERITY_INFO, JH.localize("dataset.message.editTerms"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Edit Dataset License and Terms", " - Update your dataset's license and terms of use."));
}
this.readOnly = false;
}
public String releaseDraft() {
if (releaseRadio == 1) {
return releaseDataset(true);
} else {
return releaseDataset(false);
}
}
public String releaseMajor() {
return releaseDataset(false);
}
public String sendBackToContributor() {
try {
//FIXME - Get Return Comment from sendBackToContributor popup
Command<Dataset> cmd = new ReturnDatasetToAuthorCommand(dvRequestService.getDataverseRequest(), dataset, "");
dataset = commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.reject.success"));
} catch (CommandException ex) {
String message = ex.getMessage();
logger.log(Level.SEVERE, "sendBackToContributor: {0}", message);
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.reject.failure", Collections.singletonList(message)));
}
/*
The notifications below are redundant, since the ReturnDatasetToAuthorCommand
sends them already. - L.A. Sep. 7 2017
List<AuthenticatedUser> authUsers = permissionService.getUsersWithPermissionOn(Permission.PublishDataset, dataset);
List<AuthenticatedUser> editUsers = permissionService.getUsersWithPermissionOn(Permission.EditDataset, dataset);
editUsers.removeAll(authUsers);
new HashSet<>(editUsers).forEach( au ->
userNotificationService.sendNotification(au, new Timestamp(new Date().getTime()),
UserNotification.Type.RETURNEDDS, dataset.getLatestVersion().getId())
);
*/
//FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, "DatasetSubmitted", "This dataset has been sent back to the contributor.");
//FacesContext.getCurrentInstance().addMessage(null, message);
return returnToLatestVersion();
}
public String submitDataset() {
try {
Command<Dataset> cmd = new SubmitDatasetForReviewCommand( dvRequestService.getDataverseRequest(), dataset);
dataset = commandEngine.submit(cmd);
//JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.submit.success"));
} catch (CommandException ex) {
String message = ex.getMessage();
logger.log(Level.SEVERE, "submitDataset: {0}", message);
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.submit.failure", Collections.singletonList(message)));
}
return returnToLatestVersion();
}
public String releaseParentDVAndDataset(){
releaseParentDV();
return releaseDataset(false);
}
public String releaseDataset() {
return releaseDataset(false);
}
private void releaseParentDV(){
if (session.getUser() instanceof AuthenticatedUser) {
PublishDataverseCommand cmd = new PublishDataverseCommand(dvRequestService.getDataverseRequest(), dataset.getOwner());
try {
commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(JH.localize("dataverse.publish.success"));
} catch (CommandException ex) {
logger.log(Level.SEVERE, "Unexpected Exception calling publish dataverse command", ex);
JsfHelper.addErrorMessage(JH.localize("dataverse.publish.failure"));
}
} else {
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataverse.notreleased") ,BundleUtil.getStringFromBundle( "dataverse.release.authenticatedUsersOnly"));
FacesContext.getCurrentInstance().addMessage(null, message);
}
}
public String deaccessionVersions() {
Command<DatasetVersion> cmd;
try {
if (selectedDeaccessionVersions == null) {
for (DatasetVersion dv : this.dataset.getVersions()) {
if (dv.isReleased()) {
DatasetVersion deaccession = datasetVersionService.find(dv.getId());
cmd = new DeaccessionDatasetVersionCommand(dvRequestService.getDataverseRequest(), setDatasetVersionDeaccessionReasonAndURL(deaccession), true);
DatasetVersion datasetv = commandEngine.submit(cmd);
}
}
} else {
for (DatasetVersion dv : selectedDeaccessionVersions) {
DatasetVersion deaccession = datasetVersionService.find(dv.getId());
cmd = new DeaccessionDatasetVersionCommand(dvRequestService.getDataverseRequest(), setDatasetVersionDeaccessionReasonAndURL(deaccession), false);
DatasetVersion datasetv = commandEngine.submit(cmd);
}
}
} catch (CommandException ex) {
logger.severe(ex.getMessage());
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.message.deaccessionFailure"));
}
JsfHelper.addSuccessMessage(JH.localize("datasetVersion.message.deaccessionSuccess"));
return returnToDatasetOnly();
}
private DatasetVersion setDatasetVersionDeaccessionReasonAndURL(DatasetVersion dvIn) {
int deaccessionReasonCode = getDeaccessionReasonRadio();
String deacessionReasonDetail = getDeaccessionReasonText() != null ? ( getDeaccessionReasonText()).trim() : "";
switch (deaccessionReasonCode) {
case 1:
dvIn.setVersionNote("There is identifiable data in one or more files.");
break;
case 2:
dvIn.setVersionNote("The research article has been retracted.");
break;
case 3:
dvIn.setVersionNote("The dataset has been transferred to another repository.");
break;
case 4:
dvIn.setVersionNote("IRB request.");
break;
case 5:
dvIn.setVersionNote("Legal issue or Data Usage Agreement.");
break;
case 6:
dvIn.setVersionNote("Not a valid dataset.");
break;
case 7:
break;
}
if (!deacessionReasonDetail.isEmpty()){
if (!StringUtil.isEmpty(dvIn.getVersionNote())){
dvIn.setVersionNote(dvIn.getVersionNote() + " " + deacessionReasonDetail);
} else {
dvIn.setVersionNote(deacessionReasonDetail);
}
}
dvIn.setArchiveNote(getDeaccessionForwardURLFor());
return dvIn;
}
public boolean isMapLayerToBeDeletedOnPublish(){
for (FileMetadata fmd : workingVersion.getFileMetadatas()){
if (worldMapPermissionHelper.hasMapLayerMetadata(fmd)){
if (fmd.isRestricted() || fmd.isRestrictedUI()){
return true;
}
}
}
return false;
}
private String releaseDataset(boolean minor) {
if (session.getUser() instanceof AuthenticatedUser) {
try {
final PublishDatasetResult result = commandEngine.submit(
new PublishDatasetCommand(dataset, dvRequestService.getDataverseRequest(), minor)
);
dataset = result.getDataset();
// Sucessfully executing PublishDatasetCommand does not guarantee that the dataset
// has been published. If a publishing workflow is configured, this may have sent the
// dataset into a workflow limbo, potentially waiting for a third party system to complete
// the process. So it may be premature to show the "success" message at this point.
if ( result.isCompleted() ) {
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.message.publishSuccess"));
} else {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"), BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress"));
}
} catch (CommandException ex) {
JsfHelper.addErrorMessage(ex.getLocalizedMessage());
logger.severe(ex.getMessage());
}
} else {
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.message.only.authenticatedUsers"));
}
return returnToDatasetOnly();
}
public String registerDataset() {
try {
UpdateDatasetVersionCommand cmd = new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest());
cmd.setValidateLenient(true);
dataset = commandEngine.submit(cmd);
} catch (CommandException ex) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_WARN,BundleUtil.getStringFromBundle( "dataset.registration.failed"), " - " + ex.toString()));
logger.severe(ex.getMessage());
}
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.registered"), BundleUtil.getStringFromBundle("dataset.registered.msg"));
FacesContext.getCurrentInstance().addMessage(null, message);
return returnToDatasetOnly();
}
public void refresh(ActionEvent e) {
refresh();
}
public void refresh() {
logger.fine("refreshing");
//dataset = datasetService.find(dataset.getId());
dataset = null;
logger.fine("refreshing working version");
DatasetVersionServiceBean.RetrieveDatasetVersionResponse retrieveDatasetVersionResponse = null;
if (persistentId != null) {
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByPersistentId(persistentId, version);
dataset = datasetService.findByGlobalId(persistentId);
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
} else if (versionId != null) {
retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByVersionId(versionId);
} else if (dataset.getId() != null) {
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionById(dataset.getId(), version);
dataset = datasetService.find(dataset.getId());
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
}
if (retrieveDatasetVersionResponse == null) {
// TODO:
// should probably redirect to the 404 page, if we can't find
// this version anymore.
// -- L.A. 4.2.3
return;
}
this.workingVersion = retrieveDatasetVersionResponse.getDatasetVersion();
if (this.workingVersion == null) {
// TODO:
// same as the above
return;
}
if (dataset == null) {
// this would be the case if we were retrieving the version by
// the versionId, above.
this.dataset = this.workingVersion.getDataset();
}
if (readOnly) {
datafileService.findFileMetadataOptimizedExperimental(dataset);
}
fileMetadatasSearch = workingVersion.getFileMetadatasSorted();
displayCitation = dataset.getCitation(true, workingVersion);
stateChanged = false;
}
public String deleteDataset() {
DestroyDatasetCommand cmd;
try {
cmd = new DestroyDatasetCommand(dataset, dvRequestService.getDataverseRequest());
commandEngine.submit(cmd);
/* - need to figure out what to do
Update notification in Delete Dataset Method
for (UserNotification und : userNotificationService.findByDvObject(dataset.getId())){
userNotificationService.delete(und);
} */
} catch (CommandException ex) {
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.message.deleteFailure"));
logger.severe(ex.getMessage());
}
JsfHelper.addSuccessMessage(JH.localize("dataset.message.deleteSuccess"));
return "/dataverse.xhtml?alias=" + dataset.getOwner().getAlias() + "&faces-redirect=true";
}
public String editFileMetadata(){
// If there are no files selected, return an empty string - which
// means, do nothing, don't redirect anywhere, stay on this page.
// The dialogue telling the user to select at least one file will
// be shown to them by an onclick javascript method attached to the
// filemetadata edit button on the page.
// -- L.A. 4.2.1
if (this.selectedFiles == null || this.selectedFiles.size() < 1) {
return "";
}
return "/editdatafiles.xhtml?selectedFileIds=" + getSelectedFilesIdsString() + "&datasetId=" + dataset.getId() +"&faces-redirect=true";
}
public String deleteDatasetVersion() {
DeleteDatasetVersionCommand cmd;
try {
cmd = new DeleteDatasetVersionCommand(dvRequestService.getDataverseRequest(), dataset);
commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(JH.localize("datasetVersion.message.deleteSuccess"));
} catch (CommandException ex) {
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.message.deleteFailure"));
logger.severe(ex.getMessage());
}
return returnToDatasetOnly();
}
private List<FileMetadata> selectedFiles = new ArrayList<>();
public List<FileMetadata> getSelectedFiles() {
return selectedFiles;
}
public void setSelectedFiles(List<FileMetadata> selectedFiles) {
this.selectedFiles = selectedFiles;
}
private Dataverse selectedDataverseForLinking;
public Dataverse getSelectedDataverseForLinking() {
return selectedDataverseForLinking;
}
public void setSelectedDataverseForLinking(Dataverse sdvfl) {
this.selectedDataverseForLinking = sdvfl;
}
private List<FileMetadata> selectedRestrictedFiles; // = new ArrayList<>();
public List<FileMetadata> getSelectedRestrictedFiles() {
return selectedRestrictedFiles;
}
public void setSelectedRestrictedFiles(List<FileMetadata> selectedRestrictedFiles) {
this.selectedRestrictedFiles = selectedRestrictedFiles;
}
private List<FileMetadata> selectedUnrestrictedFiles; // = new ArrayList<>();
public List<FileMetadata> getSelectedUnrestrictedFiles() {
return selectedUnrestrictedFiles;
}
public void setSelectedUnrestrictedFiles(List<FileMetadata> selectedUnrestrictedFiles) {
this.selectedUnrestrictedFiles = selectedUnrestrictedFiles;
}
private List<FileMetadata> selectedDownloadableFiles;
public List<FileMetadata> getSelectedDownloadableFiles() {
return selectedDownloadableFiles;
}
public void setSelectedDownloadableFiles(List<FileMetadata> selectedDownloadableFiles) {
this.selectedDownloadableFiles = selectedDownloadableFiles;
}
private List<FileMetadata> selectedNonDownloadableFiles;
public List<FileMetadata> getSelectedNonDownloadableFiles() {
return selectedNonDownloadableFiles;
}
public void setSelectedNonDownloadableFiles(List<FileMetadata> selectedNonDownloadableFiles) {
this.selectedNonDownloadableFiles = selectedNonDownloadableFiles;
}
public void validateFilesForDownload(boolean guestbookRequired, boolean downloadOriginal){
setSelectedDownloadableFiles(new ArrayList<>());
setSelectedNonDownloadableFiles(new ArrayList<>());
if (this.selectedFiles.isEmpty()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('selectFilesForDownload').show()");
return;
}
for (FileMetadata fmd : this.selectedFiles){
if(this.fileDownloadHelper.canDownloadFile(fmd)){
getSelectedDownloadableFiles().add(fmd);
} else {
getSelectedNonDownloadableFiles().add(fmd);
}
}
// If some of the files were restricted and we had to drop them off the
// list, and NONE of the files are left on the downloadable list
// - we show them a "you're out of luck" popup:
if(getSelectedDownloadableFiles().isEmpty() && !getSelectedNonDownloadableFiles().isEmpty()){
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('downloadInvalid').show()");
return;
}
// Note that the GuestbookResponse object may still have information from
// the last download action performed by the user. For example, it may
// still have the non-null Datafile in it, if the user has just downloaded
// a single file; or it may still have the format set to "original" -
// even if that's not what they are trying to do now.
// So make sure to reset these values:
guestbookResponse.setDataFile(null);
guestbookResponse.setSelectedFileIds(getSelectedDownloadableFilesIdsString());
if (downloadOriginal) {
guestbookResponse.setFileFormat("original");
} else {
guestbookResponse.setFileFormat("");
}
guestbookResponse.setDownloadtype("Download");
// If we have a bunch of files that we can download, AND there were no files
// that we had to take off the list, because of permissions - we can
// either send the user directly to the download API (if no guestbook/terms
// popup is required), or send them to the download popup:
if(!getSelectedDownloadableFiles().isEmpty() && getSelectedNonDownloadableFiles().isEmpty()){
if (guestbookRequired){
openDownloadPopupForMultipleFileDownload();
} else {
startMultipleFileDownload();
}
return;
}
// ... and if some files were restricted, but some are downloadable,
// we are showing them this "you are somewhat in luck" popup; that will
// then direct them to the download, or popup, as needed:
if(!getSelectedDownloadableFiles().isEmpty() && !getSelectedNonDownloadableFiles().isEmpty()){
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('downloadMixed').show()");
}
}
private boolean selectAllFiles;
public boolean isSelectAllFiles() {
return selectAllFiles;
}
public void setSelectAllFiles(boolean selectAllFiles) {
this.selectAllFiles = selectAllFiles;
}
public void toggleAllSelected(){
//This is here so that if the user selects all on the dataset page
// s/he will get all files on download
this.selectAllFiles = !this.selectAllFiles;
}
// helper Method
public String getSelectedFilesIdsString() {
String downloadIdString = "";
for (FileMetadata fmd : this.selectedFiles){
if (!StringUtil.isEmpty(downloadIdString)) {
downloadIdString += ",";
}
downloadIdString += fmd.getDataFile().getId();
}
return downloadIdString;
}
// helper Method
public String getSelectedDownloadableFilesIdsString() {
String downloadIdString = "";
for (FileMetadata fmd : this.selectedDownloadableFiles){
if (!StringUtil.isEmpty(downloadIdString)) {
downloadIdString += ",";
}
downloadIdString += fmd.getDataFile().getId();
}
return downloadIdString;
}
public void updateFileCounts(){
setSelectedUnrestrictedFiles(new ArrayList<>());
setSelectedRestrictedFiles(new ArrayList<>());
setTabularDataSelected(false);
for (FileMetadata fmd : this.selectedFiles){
if(fmd.isRestricted()){
getSelectedRestrictedFiles().add(fmd);
} else {
getSelectedUnrestrictedFiles().add(fmd);
}
if(fmd.getDataFile().isTabularData()){
setTabularDataSelected(true);
}
}
}
private List<String> getSuccessMessageArguments() {
List<String> arguments = new ArrayList<>();
String dataverseString = "";
arguments.add(StringEscapeUtils.escapeHtml(dataset.getDisplayName()));
dataverseString += " <a href=\"/dataverse/" + selectedDataverseForLinking.getAlias() + "\">" + StringEscapeUtils.escapeHtml(selectedDataverseForLinking.getDisplayName()) + "</a>";
arguments.add(dataverseString);
return arguments;
}
public void saveLinkingDataverses() {
if (selectedDataverseForLinking == null) {
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_ERROR, "", BundleUtil.getStringFromBundle("dataverse.link.select"));
FacesContext.getCurrentInstance().addMessage(null, message);
return;
}
if(saveLink(selectedDataverseForLinking)){
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.message.linkSuccess", getSuccessMessageArguments()));
} else{
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.notlinked"), linkingDataverseErrorMessage);
FacesContext.getCurrentInstance().addMessage(null, message);
}
}
private String linkingDataverseErrorMessage = "";
public String getLinkingDataverseErrorMessage() {
return linkingDataverseErrorMessage;
}
public void setLinkingDataverseErrorMessage(String linkingDataverseErrorMessage) {
this.linkingDataverseErrorMessage = linkingDataverseErrorMessage;
}
UIInput selectedLinkingDataverseMenu;
public UIInput getSelectedDataverseMenu() {
return selectedLinkingDataverseMenu;
}
public void setSelectedDataverseMenu(UIInput selectedDataverseMenu) {
this.selectedLinkingDataverseMenu = selectedDataverseMenu;
}
private Boolean saveLink(Dataverse dataverse){
boolean retVal = true;
if (readOnly) {
// Pass a "real", non-readonly dataset the the LinkDatasetCommand:
dataset = datasetService.find(dataset.getId());
}
LinkDatasetCommand cmd = new LinkDatasetCommand(dvRequestService.getDataverseRequest(), dataverse, dataset);
linkingDataverse = dataverse;
try {
commandEngine.submit(cmd);
} catch (CommandException ex) {
String msg = "There was a problem linking this dataset to yours: " + ex;
logger.severe(msg);
msg = BundleUtil.getStringFromBundle("dataset.notlinked.msg") + ex;
/**
* @todo how do we get this message to show up in the GUI?
*/
linkingDataverseErrorMessage = msg;
retVal = false;
}
return retVal;
}
public List<Dataverse> completeLinkingDataverse(String query) {
dataset = datasetService.find(dataset.getId());
if (session.getUser().isAuthenticated()) {
return dataverseService.filterDataversesForLinking(query, dvRequestService.getDataverseRequest(), dataset);
} else {
return null;
}
}
List<FileMetadata> previouslyRestrictedFiles = null;
public boolean isShowAccessPopup() {
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (fmd.isRestricted()) {
if (editMode == EditMode.CREATE) {
// if this is a brand new file, it's definitely not
// of a previously restricted kind!
return true;
}
if (previouslyRestrictedFiles != null) {
// We've already checked whether we are in the CREATE mode,
// above; and that means we can safely assume this filemetadata
// has an existing db id. So it is safe to use the .contains()
// method below:
if (!previouslyRestrictedFiles.contains(fmd)) {
return true;
}
}
}
}
return false;
}
public void setShowAccessPopup(boolean showAccessPopup) {} // dummy set method
public String testSelectedFilesForRestrict(){
RequestContext requestContext = RequestContext.getCurrentInstance();
if (selectedFiles.isEmpty()) {
requestContext.execute("PF('selectFilesForRestrict').show()");
return "";
} else {
boolean validSelection = false;
for (FileMetadata fmd : selectedFiles) {
if (!fmd.isRestricted() ){
validSelection = true;
}
}
if (!validSelection) {
requestContext.execute("PF('selectFilesForRestrict').show()");
return "";
}
testSelectedFilesForMapData();
requestContext.execute("PF('accessPopup').show()");
return "";
}
}
public String restrictSelectedFiles(boolean restricted) throws CommandException{
RequestContext requestContext = RequestContext.getCurrentInstance();
if (selectedFiles.isEmpty()) {
if (restricted) {
requestContext.execute("PF('selectFilesForRestrict').show()");
} else {
requestContext.execute("PF('selectFilesForUnRestrict').show()");
}
return "";
} else {
boolean validSelection = true;
for (FileMetadata fmd : selectedFiles) {
if ((fmd.isRestricted() && restricted) || (!fmd.isRestricted() && !restricted)) {
validSelection = false;
}
}
if (!validSelection) {
if (restricted) {
requestContext.execute("PF('selectFilesForRestrict').show()");
}
if (!restricted) {
requestContext.execute("PF('selectFilesForUnRestrict').show()");
}
return "";
}
}
if (editMode != EditMode.CREATE) {
if (bulkUpdateCheckVersion()) {
refreshSelectedFiles();
}
restrictFiles(restricted);
}
save();
return returnToDraftVersion();
}
public void restrictFiles(boolean restricted) throws CommandException {
//if (previouslyRestrictedFiles == null) {
// we don't need to buther with this "previously restricted" business
// when in Create mode... because all the files are new, so none could
// have been restricted previously;
// (well, it looks like the code below should never be called in the
// CREATE mode in the first place... the edit files fragment uses
// its own restrictFiles() method there; also, the fmd.getDataFile().equals(fmw.getDataFile()))
// line is not going to work on a new file... so be mindful of all this
// when the code between the 2 beans is merged in 4.3.
if (editMode != EditMode.CREATE) {
previouslyRestrictedFiles = new ArrayList<>();
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (fmd.isRestricted()) {
previouslyRestrictedFiles.add(fmd);
}
}
Command cmd;
String fileNames = null;
for (FileMetadata fmw : workingVersion.getFileMetadatas()) {
for (FileMetadata fmd : this.getSelectedFiles()) {
if (restricted && !fmw.isRestricted()) {
// collect the names of the newly-restrticted files,
// to show in the success message:
// I don't think this does the right thing:
// (adds too many files to the message; good thing this
// message isn't used, normally)
if (fileNames == null) {
fileNames = fmd.getLabel();
} else {
fileNames = fileNames.concat(fmd.getLabel());
}
}
if (fmd.getDataFile().equals(fmw.getDataFile())) {
cmd = new RestrictFileCommand(fmw.getDataFile(), dvRequestService.getDataverseRequest(), restricted);
commandEngine.submit(cmd);
// fmw.setRestricted(restricted);
// if (workingVersion.isDraft() && !fmw.getDataFile().isReleased()) {
// // We do not really need to check that the working version is
// // a draft here - it must be a draft, if we've gotten this
// // far. But just in case. -- L.A. 4.2.1
// fmw.getDataFile().setRestricted(restricted);
// }
}
}
}
if (fileNames != null) {
String successMessage = JH.localize("file.restricted.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", fileNames);
JsfHelper.addFlashMessage(successMessage);
}
}
}
public int getRestrictedFileCount() {
int restrictedFileCount = 0;
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (fmd.isRestricted()) {
restrictedFileCount++;
}
}
return restrictedFileCount;
}
private List<FileMetadata> filesToBeDeleted = new ArrayList<>();
public String deleteFilesAndSave(){
bulkFileDeleteInProgress = true;
if (bulkUpdateCheckVersion()){
refreshSelectedFiles();
}
deleteFiles();
return save();
}
public void deleteFiles() {
for (FileMetadata markedForDelete : selectedFiles) {
if (markedForDelete.getId() != null) {
// This FileMetadata has an id, i.e., it exists in the database.
// We are going to remove this filemetadata from the version:
dataset.getEditVersion().getFileMetadatas().remove(markedForDelete);
// But the actual delete will be handled inside the UpdateDatasetCommand
// (called later on). The list "filesToBeDeleted" is passed to the
// command as a parameter:
filesToBeDeleted.add(markedForDelete);
} else {
// This FileMetadata does not have an id, meaning it has just been
// created, and not yet saved in the database. This in turn means this is
// a freshly created DRAFT version; specifically created because
// the user is trying to delete a file from an existing published
// version. This means we are not really *deleting* the file -
// we are going to keep it in the published version; we are simply
// going to save a new DRAFT version that does not contain this file.
// So below we are deleting the metadata from the version; we are
// NOT adding the file to the filesToBeDeleted list that will be
// passed to the UpdateDatasetCommand. -- L.A. Aug 2017
Iterator<FileMetadata> fmit = dataset.getEditVersion().getFileMetadatas().iterator();
while (fmit.hasNext()) {
FileMetadata fmd = fmit.next();
if (markedForDelete.getDataFile().getStorageIdentifier().equals(fmd.getDataFile().getStorageIdentifier())) {
// And if this is an image file that happens to be assigned
// as the dataset thumbnail, let's null the assignment here:
if (fmd.getDataFile().equals(dataset.getThumbnailFile())) {
dataset.setThumbnailFile(null);
}
//if not published then delete identifier
if (!fmd.getDataFile().isReleased()){
try{
commandEngine.submit(new DeleteDataFileCommand(fmd.getDataFile(), dvRequestService.getDataverseRequest()));
} catch (CommandException e){
//this command is here to delete the identifier of unreleased files
//if it fails then a reserved identifier may still be present on the remote provider
}
}
fmit.remove();
break;
}
}
}
}
/*
Do note that if we are deleting any files that have UNFs (i.e.,
tabular files), we DO NEED TO RECALCULATE the UNF of the version!
- but we will do this inside the UpdateDatasetCommand.
*/
}
public String save() {
//Before dataset saved, write cached prov freeform to version
if(systemConfig.isProvCollectionEnabled()) {
provPopupFragmentBean.saveStageProvFreeformToLatestVersion();
}
// Validate
Set<ConstraintViolation> constraintViolations = workingVersion.validate();
if (!constraintViolations.isEmpty()) {
//JsfHelper.addFlashMessage(JH.localize("dataset.message.validationError"));
JH.addMessage(FacesMessage.SEVERITY_ERROR, JH.localize("dataset.message.validationError"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Validation Error", "See below for details."));
return "";
}
// Use the Create or Update command to save the dataset:
Command<Dataset> cmd;
try {
if (editMode == EditMode.CREATE) {
if ( selectedTemplate != null ) {
if ( isSessionUserAuthenticated() ) {
cmd = new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest(), false, selectedTemplate);
} else {
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.create.authenticatedUsersOnly"));
return null;
}
} else {
cmd = new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest());
}
} else {
cmd = new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest(), filesToBeDeleted);
((UpdateDatasetVersionCommand) cmd).setValidateLenient(true);
}
dataset = commandEngine.submit(cmd);
if (editMode == EditMode.CREATE) {
if (session.getUser() instanceof AuthenticatedUser) {
userNotificationService.sendNotification((AuthenticatedUser) session.getUser(), dataset.getCreateDate(), UserNotification.Type.CREATEDS, dataset.getLatestVersion().getId());
}
}
logger.fine("Successfully executed SaveDatasetCommand.");
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex).append(" ");
error.append(ex.getMessage()).append(" ");
Throwable cause = ex;
while (cause.getCause()!= null) {
cause = cause.getCause();
error.append(cause).append(" ");
error.append(cause.getMessage()).append(" ");
}
logger.log(Level.FINE, "Couldn''t save dataset: {0}", error.toString());
populateDatasetUpdateFailureMessage();
return returnToDraftVersion();
} catch (CommandException ex) {
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + ex.toString()));
logger.log(Level.SEVERE, "CommandException, when attempting to update the dataset: " + ex.getMessage(), ex);
populateDatasetUpdateFailureMessage();
return returnToDraftVersion();
}
if (editMode != null) {
if (editMode.equals(EditMode.CREATE)) {
// We allow users to upload files on Create:
int nNewFiles = newFiles.size();
logger.fine("NEW FILES: "+nNewFiles);
if (nNewFiles > 0) {
// Save the NEW files permanently and add the to the dataset:
// But first, fully refresh the newly created dataset (with a
// datasetService.find().
// We have reasons to believe that the CreateDatasetCommand
// returns the dataset that doesn't have all the
// RoleAssignments properly linked to it - even though they
// have been created in the dataset.
dataset = datasetService.find(dataset.getId());
List<DataFile> filesAdded = ingestService.saveAndAddFilesToDataset(dataset.getEditVersion(), newFiles);
newFiles.clear();
// and another update command:
boolean addFilesSuccess = false;
cmd = new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest(), new ArrayList<FileMetadata>());
try {
dataset = commandEngine.submit(cmd);
addFilesSuccess = true;
} catch (Exception ex) {
addFilesSuccess = false;
}
if (addFilesSuccess && dataset.getFiles().size() > 0) {
if (nNewFiles == dataset.getFiles().size()) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.createSuccess"));
} else {
String partialSuccessMessage = JH.localize("dataset.message.createSuccess.partialSuccessSavingFiles");
partialSuccessMessage = partialSuccessMessage.replace("{0}", "" + dataset.getFiles().size() + "");
partialSuccessMessage = partialSuccessMessage.replace("{1}", "" + nNewFiles + "");
JsfHelper.addWarningMessage(partialSuccessMessage);
}
} else {
JsfHelper.addWarningMessage(JH.localize("dataset.message.createSuccess.failedToSaveFiles"));
}
} else {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.createSuccess"));
}
}
if (editMode.equals(EditMode.METADATA)) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.metadataSuccess"));
}
if (editMode.equals(EditMode.LICENSE)) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.termsSuccess"));
}
if (editMode.equals(EditMode.FILE)) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.filesSuccess"));
}
} else {
// must have been a bulk file update or delete:
if (bulkFileDeleteInProgress) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.bulkFileDeleteSuccess"));
} else {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.bulkFileUpdateSuccess"));
}
}
editMode = null;
bulkFileDeleteInProgress = false;
// Call Ingest Service one more time, to
// queue the data ingest jobs for asynchronous execution:
ingestService.startIngestJobsForDataset(dataset, (AuthenticatedUser) session.getUser());
//After dataset saved, then persist prov json data
if(systemConfig.isProvCollectionEnabled()) {
try {
provPopupFragmentBean.saveStagedProvJson(false, dataset.getLatestVersion().getFileMetadatas());
} catch (AbstractApiBean.WrappedResponse ex) {
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("file.metadataTab.provenance.error"));
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, ex);
}
}
logger.fine("Redirecting to the Dataset page.");
return returnToDraftVersion();
}
private void populateDatasetUpdateFailureMessage(){
if (editMode == null) {
// that must have been a bulk file update or delete:
if (bulkFileDeleteInProgress) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.bulkFileDeleteFailure"));
} else {
JsfHelper.addErrorMessage(JH.localize("dataset.message.filesFailure"));
}
} else {
if (editMode.equals(EditMode.CREATE)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.createFailure"));
}
if (editMode.equals(EditMode.METADATA)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.metadataFailure"));
}
if (editMode.equals(EditMode.LICENSE)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.termsFailure"));
}
if (editMode.equals(EditMode.FILE)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.filesFailure"));
}
}
bulkFileDeleteInProgress = false;
}
private String returnToLatestVersion(){
dataset = datasetService.find(dataset.getId());
workingVersion = dataset.getLatestVersion();
if (workingVersion.isDeaccessioned() && dataset.getReleasedVersion() != null) {
workingVersion = dataset.getReleasedVersion();
}
setVersionTabList(resetVersionTabList());
setReleasedVersionTabList(resetReleasedVersionTabList());
newFiles.clear();
editMode = null;
return "/dataset.xhtml?persistentId=" + dataset.getGlobalIdString() + "&version="+ workingVersion.getFriendlyVersionNumber() + "&faces-redirect=true";
}
private String returnToDatasetOnly(){
dataset = datasetService.find(dataset.getId());
editMode = null;
return "/dataset.xhtml?persistentId=" + dataset.getGlobalIdString() + "&faces-redirect=true";
}
private String returnToDraftVersion(){
return "/dataset.xhtml?persistentId=" + dataset.getGlobalIdString() + "&version=DRAFT" + "&faces-redirect=true";
}
public String cancel() {
return returnToLatestVersion();
}
private HttpClient getClient() {
// TODO:
// cache the http client? -- L.A. 4.0 alpha
return new HttpClient();
}
public void refreshLock() {
//RequestContext requestContext = RequestContext.getCurrentInstance();
logger.fine("checking lock");
if (isStillLocked()) {
logger.fine("(still locked)");
} else {
// OK, the dataset is no longer locked.
// let's tell the page to refresh:
logger.fine("no longer locked!");
stateChanged = true;
lockedFromEditsVar = null;
lockedFromDownloadVar = null;
//requestContext.execute("refreshPage();");
}
}
public void refreshIngestLock() {
//RequestContext requestContext = RequestContext.getCurrentInstance();
logger.fine("checking ingest lock");
if (isStillLockedForIngest()) {
logger.fine("(still locked)");
} else {
// OK, the dataset is no longer locked.
// let's tell the page to refresh:
logger.fine("no longer locked!");
stateChanged = true;
lockedFromEditsVar = null;
lockedFromDownloadVar = null;
//requestContext.execute("refreshPage();");
}
}
public void refreshAllLocks() {
//RequestContext requestContext = RequestContext.getCurrentInstance();
logger.fine("checking all locks");
if (isStillLockedForAnyReason()) {
logger.fine("(still locked)");
} else {
// OK, the dataset is no longer locked.
// let's tell the page to refresh:
logger.fine("no longer locked!");
stateChanged = true;
lockedFromEditsVar = null;
lockedFromDownloadVar = null;
//requestContext.execute("refreshPage();");
}
}
/*
public boolean isLockedInProgress() {
if (dataset != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if (dataset.isLocked()) {
return true;
}
}
return false;
}*/
public boolean isDatasetLockedInWorkflow() {
return (dataset != null)
? dataset.isLockedFor(DatasetLock.Reason.Workflow)
: false;
}
public boolean isStillLocked() {
if (dataset != null && dataset.getId() != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if(dataset.getLocks().size() == 1 && dataset.getLockFor(DatasetLock.Reason.InReview) != null){
return false;
}
if (datasetService.checkDatasetLock(dataset.getId())) {
return true;
}
}
return false;
}
public boolean isStillLockedForIngest() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (testDataset != null && testDataset.getId() != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if (testDataset.getLockFor(DatasetLock.Reason.Ingest) != null) {
return true;
}
}
}
return false;
}
public boolean isStillLockedForAnyReason() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (testDataset != null && testDataset.getId() != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if (testDataset.getLocks().size() > 0) {
return true;
}
}
}
return false;
}
public boolean isLocked() {
if (stateChanged) {
return false;
}
if (dataset != null) {
if (dataset.isLocked()) {
return true;
}
}
return false;
}
public boolean isLockedForIngest() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (stateChanged) {
return false;
}
if (testDataset != null) {
if (testDataset.getLockFor(DatasetLock.Reason.Ingest) != null) {
return true;
}
}
}
return false;
}
public boolean isLockedForAnyReason() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (stateChanged) {
return false;
}
if (testDataset != null) {
if (testDataset.getLocks().size() > 0) {
return true;
}
}
}
return false;
}
private Boolean lockedFromEditsVar;
private Boolean lockedFromDownloadVar;
private boolean lockedDueToDcmUpload;
/**
* Authors are not allowed to edit but curators are allowed - when Dataset is inReview
* For all other locks edit should be locked for all editors.
*/
public boolean isLockedFromEdits() {
if(null == lockedFromEditsVar || stateChanged) {
try {
permissionService.checkEditDatasetLock(dataset, dvRequestService.getDataverseRequest(), new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest()));
lockedFromEditsVar = false;
} catch (IllegalCommandException ex) {
lockedFromEditsVar = true;
}
}
return lockedFromEditsVar;
}
// TODO: investigate why this method was needed in the first place?
// It appears that it was written under the assumption that downloads
// should not be allowed when a dataset is locked... (why?)
// There are calls to the method throghout the file-download-buttons fragment;
// except the way it's done there, it's actually disregarded (??) - so the
// download buttons ARE always enabled. The only place where this method is
// honored is on the batch (mutliple file) download buttons in filesFragment.xhtml.
// As I'm working on #4000, I've been asked to re-enable the batch download
// buttons there as well, even when the dataset is locked. I'm doing that - but
// I feel we should probably figure out why we went to the trouble of creating
// this code in the first place... is there some reason we are forgetting now,
// why we do actually want to disable downloads on locked datasets???
// -- L.A. Aug. 2018
public boolean isLockedFromDownload(){
if(null == lockedFromDownloadVar || stateChanged) {
try {
permissionService.checkDownloadFileLock(dataset, dvRequestService.getDataverseRequest(), new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest()));
lockedFromDownloadVar = false;
} catch (IllegalCommandException ex) {
lockedFromDownloadVar = true;
return true;
}
}
return lockedFromDownloadVar;
}
public boolean isLockedDueToDcmUpload() {
return lockedDueToDcmUpload;
}
public void setLocked(boolean locked) {
// empty method, so that we can use DatasetPage.locked in a hidden
// input on the page.
}
public void setLockedForIngest(boolean locked) {
// empty method, so that we can use DatasetPage.locked in a hidden
// input on the page.
}
public void setLockedForAnyReason(boolean locked) {
// empty method, so that we can use DatasetPage.locked in a hidden
// input on the page.
}
public boolean isStateChanged() {
return stateChanged;
}
public void setStateChanged(boolean stateChanged) {
// empty method, so that we can use DatasetPage.stateChanged in a hidden
// input on the page.
}
public DatasetVersionUI getDatasetVersionUI() {
return datasetVersionUI;
}
public List<DatasetVersion> getVersionTabList() {
return versionTabList;
}
public List<DatasetVersion> getVersionTabListForPostLoad(){
return this.versionTabListForPostLoad;
}
public void setVersionTabListForPostLoad(List<DatasetVersion> versionTabListForPostLoad) {
this.versionTabListForPostLoad = versionTabListForPostLoad;
}
public Integer getCompareVersionsCount() {
Integer retVal = 0;
for (DatasetVersion dvTest : dataset.getVersions()) {
if (!dvTest.isDeaccessioned()) {
retVal++;
}
}
return retVal;
}
/**
* To improve performance, Version Differences
* are retrieved/calculated after the page load
*
* See: dataset-versions.xhtml, remoteCommand 'postLoadVersionTablList'
*/
public void postLoadSetVersionTabList(){
if (this.getVersionTabList().isEmpty() && workingVersion.isDeaccessioned()){
setVersionTabList(resetVersionTabList());
}
this.setVersionTabListForPostLoad(this.getVersionTabList());
//this.versionTabList = this.resetVersionTabList();
}
/**
*
*
* @param versionTabList
*/
public void setVersionTabList(List<DatasetVersion> versionTabList) {
this.versionTabList = versionTabList;
}
private List<DatasetVersion> releasedVersionTabList = new ArrayList<>();
public List<DatasetVersion> getReleasedVersionTabList() {
return releasedVersionTabList;
}
public void setReleasedVersionTabList(List<DatasetVersion> releasedVersionTabList) {
this.releasedVersionTabList = releasedVersionTabList;
}
private List<DatasetVersion> selectedVersions;
public List<DatasetVersion> getSelectedVersions() {
return selectedVersions;
}
public void setSelectedVersions(List<DatasetVersion> selectedVersions) {
this.selectedVersions = selectedVersions;
}
private List<DatasetVersion> selectedDeaccessionVersions;
public List<DatasetVersion> getSelectedDeaccessionVersions() {
return selectedDeaccessionVersions;
}
public void setSelectedDeaccessionVersions(List<DatasetVersion> selectedDeaccessionVersions) {
this.selectedDeaccessionVersions = selectedDeaccessionVersions;
}
public DatasetVersionDifference getDatasetVersionDifference() {
return datasetVersionDifference;
}
public void setDatasetVersionDifference(DatasetVersionDifference datasetVersionDifference) {
this.datasetVersionDifference = datasetVersionDifference;
}
public void startMultipleFileDownload (){
boolean doNotSaveGuestbookResponse = workingVersion.isDraft();
// There's a chance that this is not really a batch download - i.e.,
// there may only be one file on the downloadable list. But the fileDownloadService
// method below will check for that, and will redirect to the single download, if
// that's the case. -- L.A.
fileDownloadService.writeGuestbookAndStartBatchDownload(guestbookResponse, doNotSaveGuestbookResponse);
}
private String downloadType = "";
public String getDownloadType() {
return downloadType;
}
public void setDownloadType(String downloadType) {
this.downloadType = downloadType;
}
public void openDownloadPopupForMultipleFileDownload() {
if (this.selectedFiles.isEmpty()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('selectFilesForDownload').show()");
return;
}
// There's a chance that this is not really a batch download - i.e.,
// there may only be one file on the downloadable list. But the fileDownloadService
// method below will check for that, and will redirect to the single download, if
// that's the case. -- L.A.
this.guestbookResponse.setDownloadtype("Download");
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('downloadPopup').show();handleResizeDialog('downloadPopup');");
}
public void initGuestbookMultipleResponse(String selectedFileIds){
initGuestbookResponse(null, "download", selectedFileIds);
}
public void initGuestbookResponse(FileMetadata fileMetadata, String downloadFormat, String selectedFileIds) {
this.guestbookResponse = guestbookResponseService.initGuestbookResponse(fileMetadata, downloadFormat, selectedFileIds, session);
}
public void compareVersionDifferences() {
RequestContext requestContext = RequestContext.getCurrentInstance();
if (this.selectedVersions.size() != 2) {
requestContext.execute("openCompareTwo();");
} else {
//order depends on order of selection - needs to be chronological order
if (this.selectedVersions.get(0).getId().intValue() > this.selectedVersions.get(1).getId().intValue()) {
updateVersionDifferences(this.selectedVersions.get(0), this.selectedVersions.get(1));
} else {
updateVersionDifferences(this.selectedVersions.get(1), this.selectedVersions.get(0));
}
}
}
public void updateVersionDifferences(DatasetVersion newVersion, DatasetVersion originalVersion) {
if (originalVersion == null) {
setDatasetVersionDifference(newVersion.getDefaultVersionDifference());
} else {
setDatasetVersionDifference(new DatasetVersionDifference(newVersion, originalVersion));
}
}
private List<DatasetVersion> resetVersionTabList() {
//if (true)return null;
List<DatasetVersion> retList = new ArrayList<>();
if (permissionService.on(dataset).has(Permission.ViewUnpublishedDataset)) {
for (DatasetVersion version : dataset.getVersions()) {
version.setContributorNames(datasetVersionService.getContributorsNames(version));
retList.add(version);
}
} else {
for (DatasetVersion version : dataset.getVersions()) {
if (version.isReleased() || version.isDeaccessioned()) {
version.setContributorNames(datasetVersionService.getContributorsNames(version));
retList.add(version);
}
}
}
return retList;
}
private boolean existReleasedVersion;
public boolean isExistReleasedVersion() {
return existReleasedVersion;
}
public void setExistReleasedVersion(boolean existReleasedVersion) {
this.existReleasedVersion = existReleasedVersion;
}
private boolean resetExistRealeaseVersion(){
for (DatasetVersion version : dataset.getVersions()) {
if (version.isReleased() || version.isArchived()) {
return true;
}
}
return false;
}
private List<DatasetVersion> resetReleasedVersionTabList() {
List<DatasetVersion> retList = new ArrayList<>();
for (DatasetVersion version : dataset.getVersions()) {
if (version.isReleased() || version.isArchived()) {
retList.add(version);
}
}
return retList;
}
public String getDatasetPublishCustomText(){
String datasetPublishCustomText = settingsWrapper.getValueForKey(SettingsServiceBean.Key.DatasetPublishPopupCustomText);
if( datasetPublishCustomText!= null && !datasetPublishCustomText.isEmpty()){
return datasetPublishCustomText;
}
return "";
}
public Boolean isDatasetPublishPopupCustomTextOnAllVersions(){
return settingsWrapper.isTrueForKey(SettingsServiceBean.Key.DatasetPublishPopupCustomTextOnAllVersions, false);
}
public String getVariableMetadataURL(Long fileid) {
String myHostURL = getDataverseSiteUrl();
String metaURL = myHostURL + "/api/meta/datafile/" + fileid;
return metaURL;
}
public String getTabularDataFileURL(Long fileid) {
String myHostURL = getDataverseSiteUrl();
String dataURL = myHostURL + "/api/access/datafile/" + fileid;
return dataURL;
}
public List< String[]> getExporters(){
List<String[]> retList = new ArrayList<>();
String myHostURL = getDataverseSiteUrl();
for (String [] provider : ExportService.getInstance(settingsService).getExportersLabels() ){
String formatName = provider[1];
String formatDisplayName = provider[0];
Exporter exporter = null;
try {
exporter = ExportService.getInstance(settingsService).getExporter(formatName);
} catch (ExportException ex) {
exporter = null;
}
if (exporter != null && exporter.isAvailableToUsers()) {
// Not all metadata exports should be presented to the web users!
// Some are only for harvesting clients.
String[] temp = new String[2];
temp[0] = formatDisplayName;
temp[1] = myHostURL + "/api/datasets/export?exporter=" + formatName + "&persistentId=" + dataset.getGlobalIdString();
retList.add(temp);
}
}
return retList;
}
private FileMetadata fileMetadataSelected = null;
public void setFileMetadataSelected(FileMetadata fm){
setFileMetadataSelected(fm, null);
}
public void setFileMetadataSelected(FileMetadata fm, String guestbook) {
if (guestbook != null) {
if (guestbook.equals("create")) {
//
/*
FIX ME guestbook entry for subsetting
*/
// guestbookResponseService.createSilentGuestbookEntry(fm, "Subset");
} else {
initGuestbookResponse(fm, "Subset", null);
}
}
fileMetadataSelected = fm;
logger.fine("set the file for the advanced options popup (" + fileMetadataSelected.getLabel() + ")");
}
public FileMetadata getFileMetadataSelected() {
if (fileMetadataSelected != null) {
logger.fine("returning file metadata for the advanced options popup (" + fileMetadataSelected.getLabel() + ")");
} else {
logger.fine("file metadata for the advanced options popup is null.");
}
return fileMetadataSelected;
}
public void clearFileMetadataSelected() {
fileMetadataSelected = null;
}
public boolean isDesignatedDatasetThumbnail (FileMetadata fileMetadata) {
if (fileMetadata != null) {
if (fileMetadata.getDataFile() != null) {
if (fileMetadata.getDataFile().getId() != null) {
if (fileMetadata.getDataFile().getOwner() != null) {
if (fileMetadata.getDataFile().equals(fileMetadata.getDataFile().getOwner().getThumbnailFile())) {
return true;
}
}
}
}
}
return false;
}
/*
* Items for the "Designated this image as the Dataset thumbnail:
*/
private FileMetadata fileMetadataSelectedForThumbnailPopup = null;
public void setFileMetadataSelectedForThumbnailPopup(FileMetadata fm){
fileMetadataSelectedForThumbnailPopup = fm;
alreadyDesignatedAsDatasetThumbnail = getUseAsDatasetThumbnail();
}
public FileMetadata getFileMetadataSelectedForThumbnailPopup() {
return fileMetadataSelectedForThumbnailPopup;
}
public void clearFileMetadataSelectedForThumbnailPopup() {
fileMetadataSelectedForThumbnailPopup = null;
}
private boolean alreadyDesignatedAsDatasetThumbnail = false;
public boolean getUseAsDatasetThumbnail() {
if (fileMetadataSelectedForThumbnailPopup != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getId() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().equals(fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner().getThumbnailFile())) {
return true;
}
}
}
}
}
return false;
}
public void setUseAsDatasetThumbnail(boolean useAsThumbnail) {
if (fileMetadataSelectedForThumbnailPopup != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getId() != null) { // ?
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner() != null) {
if (useAsThumbnail) {
fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner().setThumbnailFile(fileMetadataSelectedForThumbnailPopup.getDataFile());
} else if (getUseAsDatasetThumbnail()) {
fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner().setThumbnailFile(null);
}
}
}
}
}
}
public void saveAsDesignatedThumbnail() {
// We don't need to do anything specific to save this setting, because
// the setUseAsDatasetThumbnail() method, above, has already updated the
// file object appropriately.
// However, once the "save" button is pressed, we want to show a success message, if this is
// a new image has been designated as such:
if (getUseAsDatasetThumbnail() && !alreadyDesignatedAsDatasetThumbnail) {
String successMessage = JH.localize("file.assignedDataverseImage.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", fileMetadataSelectedForThumbnailPopup.getLabel());
JsfHelper.addFlashMessage(successMessage);
}
// And reset the selected fileMetadata:
fileMetadataSelectedForThumbnailPopup = null;
}
/*
* Items for the "Tags (Categories)" popup.
*
*/
private FileMetadata fileMetadataSelectedForTagsPopup = null;
public void setFileMetadataSelectedForTagsPopup(){
}
public void setFileMetadataSelectedForTagsPopup(FileMetadata fm){
fileMetadataSelectedForTagsPopup = fm;
}
public FileMetadata getFileMetadataSelectedForTagsPopup() {
return fileMetadataSelectedForTagsPopup;
}
public void clearFileMetadataSelectedForTagsPopup() {
fileMetadataSelectedForTagsPopup = null;
}
public List <FileMetadata> getListFileMetadataSelectedForTagsPopup(){
List<FileMetadata> retList = new ArrayList<>();
for (FileMetadata fm : selectedFiles){
retList.add(fm);
}
return retList;
}
private List<String> categoriesByName;
public void setCategoriesByName(List<String> dummy){
categoriesByName = dummy;
}
public void refreshTagsPopUp(){
if (bulkUpdateCheckVersion()){
refreshSelectedFiles();
}
updateFileCounts();
refreshCategoriesByName();
refreshTabFileTagsByName();
}
private List<String> tabFileTagsByName;
public List<String> getTabFileTagsByName() {
return tabFileTagsByName;
}
public void setTabFileTagsByName(List<String> tabFileTagsByName) {
this.tabFileTagsByName = tabFileTagsByName;
}
private void refreshCategoriesByName(){
categoriesByName= new ArrayList<>();
for (String category: dataset.getCategoriesByName() ){
categoriesByName.add(category);
}
refreshSelectedTags();
}
public List<String> getCategoriesByName() {
return categoriesByName;
}
/*
* 1. Tabular File Tags:
*/
private List<String> tabFileTags = null;
public List<String> getTabFileTags() {
if (tabFileTags == null) {
tabFileTags = DataFileTag.listTags();
}
return tabFileTags;
}
public void setTabFileTags(List<String> tabFileTags) {
this.tabFileTags = tabFileTags;
}
private String[] selectedTabFileTags = {};
public String[] getSelectedTabFileTags() {
return selectedTabFileTags;
}
public void setSelectedTabFileTags(String[] selectedTabFileTags) {
this.selectedTabFileTags = selectedTabFileTags;
}
private String[] selectedTags = {};
public void handleSelection(final AjaxBehaviorEvent event) {
if (selectedTags != null) {
selectedTags = selectedTags.clone();
}
}
private void refreshTabFileTagsByName(){
tabFileTagsByName= new ArrayList<>();
for (FileMetadata fm : selectedFiles) {
if (fm.getDataFile().getTags() != null) {
for (int i = 0; i < fm.getDataFile().getTags().size(); i++) {
if (!tabFileTagsByName.contains(fm.getDataFile().getTags().get(i).getTypeLabel())) {
tabFileTagsByName.add(fm.getDataFile().getTags().get(i).getTypeLabel());
}
}
}
}
refreshSelectedTabFileTags();
}
private void refreshSelectedTabFileTags() {
selectedTabFileTags = null;
selectedTabFileTags = new String[0];
if (tabFileTagsByName.size() > 0) {
selectedTabFileTags = new String[tabFileTagsByName.size()];
for (int i = 0; i < tabFileTagsByName.size(); i++) {
selectedTabFileTags[i] = tabFileTagsByName.get(i);
}
}
Arrays.sort(selectedTabFileTags);
}
private boolean tabularDataSelected = false;
public boolean isTabularDataSelected() {
return tabularDataSelected;
}
public void setTabularDataSelected(boolean tabularDataSelected) {
this.tabularDataSelected = tabularDataSelected;
}
public String[] getSelectedTags() {
return selectedTags;
}
public void setSelectedTags(String[] selectedTags) {
this.selectedTags = selectedTags;
}
/*
* "File Tags" (aka "File Categories"):
*/
private String newCategoryName = null;
public String getNewCategoryName() {
return newCategoryName;
}
public void setNewCategoryName(String newCategoryName) {
this.newCategoryName = newCategoryName;
}
public String saveNewCategory() {
if (newCategoryName != null && !newCategoryName.isEmpty()) {
categoriesByName.add(newCategoryName);
}
//Now increase size of selectedTags and add new category
String[] temp = new String[selectedTags.length + 1];
System.arraycopy(selectedTags, 0, temp, 0, selectedTags.length);
selectedTags = temp;
selectedTags[selectedTags.length - 1] = newCategoryName;
//Blank out added category
newCategoryName = "";
return "";
}
private void refreshSelectedTags() {
selectedTags = null;
selectedTags = new String[0];
List<String> selectedCategoriesByName= new ArrayList<>();
for (FileMetadata fm : selectedFiles) {
if (fm.getCategories() != null) {
for (int i = 0; i < fm.getCategories().size(); i++) {
if (!selectedCategoriesByName.contains(fm.getCategories().get(i).getName())) {
selectedCategoriesByName.add(fm.getCategories().get(i).getName());
}
}
}
}
if (selectedCategoriesByName.size() > 0) {
selectedTags = new String[selectedCategoriesByName.size()];
for (int i = 0; i < selectedCategoriesByName.size(); i++) {
selectedTags[i] = selectedCategoriesByName.get(i);
}
}
Arrays.sort(selectedTags);
}
/* This method handles saving both "tabular file tags" and
* "file categories" (which are also considered "tags" in 4.0)
*/
public String saveFileTagsAndCategories() {
// 1. New Category name:
// With we don't need to do anything for the file categories that the user
// selected from the pull down list; that was done directly from the
// page with the FileMetadata.setCategoriesByName() method.
// So here we only need to take care of the new, custom category
// name, if entered:
if (bulkUpdateCheckVersion()) {
refreshSelectedFiles();
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (selectedFiles != null && selectedFiles.size() > 0) {
for (FileMetadata fm : selectedFiles) {
if (fm.getDataFile().equals(fmd.getDataFile())) {
fmd.setCategories(new ArrayList<>());
if (newCategoryName != null) {
fmd.addCategoryByName(newCategoryName);
}
// 2. Tabular DataFile Tags:
if (selectedTags != null) {
for (String selectedTag : selectedTags) {
fmd.addCategoryByName(selectedTag);
}
}
if (fmd.getDataFile().isTabularData()) {
fmd.getDataFile().setTags(null);
for (String selectedTabFileTag : selectedTabFileTags) {
DataFileTag tag = new DataFileTag();
try {
tag.setTypeByLabel(selectedTabFileTag);
tag.setDataFile(fmd.getDataFile());
fmd.getDataFile().addTag(tag);
}catch (IllegalArgumentException iax) {
// ignore
}
}
}
}
}
}
}
// success message:
String successMessage = JH.localize("file.assignedTabFileTags.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", "Selected Files");
JsfHelper.addFlashMessage(successMessage);
selectedTags = null;
logger.fine("New category name: " + newCategoryName);
newCategoryName = null;
if (removeUnusedTags){
removeUnusedFileTagsFromDataset();
}
save();
return returnToDraftVersion();
}
/*
Remove unused file tags
When updating datafile tags see if any custom tags are not in use.
Remove them
*/
private void removeUnusedFileTagsFromDataset() {
categoriesByName = new ArrayList<>();
for (FileMetadata fm : workingVersion.getFileMetadatas()) {
if (fm.getCategories() != null) {
for (int i = 0; i < fm.getCategories().size(); i++) {
if (!categoriesByName.contains(fm.getCategories().get(i).getName())) {
categoriesByName.add(fm.getCategories().get(i).getName());
}
}
}
}
List<DataFileCategory> datasetFileCategoriesToRemove = new ArrayList<>();
for (DataFileCategory test : dataset.getCategories()) {
boolean remove = true;
for (String catByName : categoriesByName) {
if (catByName.equals(test.getName())) {
remove = false;
break;
}
}
if (remove) {
datasetFileCategoriesToRemove.add(test);
}
}
if (!datasetFileCategoriesToRemove.isEmpty()) {
for (DataFileCategory remove : datasetFileCategoriesToRemove) {
dataset.getCategories().remove(remove);
}
}
}
/*
* Items for the "Advanced (Ingest) Options" popup.
*
*/
private FileMetadata fileMetadataSelectedForIngestOptionsPopup = null;
public void setFileMetadataSelectedForIngestOptionsPopup(FileMetadata fm){
fileMetadataSelectedForIngestOptionsPopup = fm;
}
public FileMetadata getFileMetadataSelectedForIngestOptionsPopup() {
return fileMetadataSelectedForIngestOptionsPopup;
}
public void clearFileMetadataSelectedForIngestOptionsPopup() {
fileMetadataSelectedForIngestOptionsPopup = null;
}
private String ingestLanguageEncoding = null;
public String getIngestLanguageEncoding() {
if (ingestLanguageEncoding == null) {
return "UTF8 (default)";
}
return ingestLanguageEncoding;
}
public void setIngestLanguageEncoding(String ingestLanguageEncoding) {
this.ingestLanguageEncoding = ingestLanguageEncoding;
}
public void setIngestEncoding(String ingestEncoding) {
ingestLanguageEncoding = ingestEncoding;
}
private String savedLabelsTempFile = null;
public void handleLabelsFileUpload(FileUploadEvent event) {
logger.fine("entering handleUpload method.");
UploadedFile file = event.getFile();
if (file != null) {
InputStream uploadStream = null;
try {
uploadStream = file.getInputstream();
} catch (IOException ioex) {
logger.log(Level.WARNING, ioex, ()->"the file "+file.getFileName()+" failed to upload!");
List<String> args = Arrays.asList(file.getFileName());
String msg = BundleUtil.getStringFromBundle("dataset.file.uploadFailure.detailmsg", args);
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.file.uploadFailure"), msg);
FacesContext.getCurrentInstance().addMessage(null, message);
return;
}
savedLabelsTempFile = saveTempFile(uploadStream);
logger.fine(()->file.getFileName() + " is successfully uploaded.");
List<String> args = Arrays.asList(file.getFileName());
FacesMessage message = new FacesMessage(BundleUtil.getStringFromBundle("dataset.file.upload", args));
FacesContext.getCurrentInstance().addMessage(null, message);
}
// process file (i.e., just save it in a temp location; for now):
}
private String saveTempFile(InputStream input) {
if (input == null) {
return null;
}
byte[] buffer = new byte[8192];
int bytesRead = 0;
File labelsFile = null;
FileOutputStream output = null;
try {
labelsFile = File.createTempFile("tempIngestLabels.", ".txt");
output = new FileOutputStream(labelsFile);
while ((bytesRead = input.read(buffer)) > -1) {
output.write(buffer, 0, bytesRead);
}
} catch (IOException ioex) {
return null;//leaving this purely in the spirit of minimizing changes.
} finally {
IOUtils.closeQuietly(input);
IOUtils.closeQuietly(output);
}
if (labelsFile != null) {
return labelsFile.getAbsolutePath();
}
return null;
}
public void saveAdvancedOptions() {
// Language encoding for SPSS SAV (and, possibly, other tabular ingests:)
if (ingestLanguageEncoding != null) {
if (fileMetadataSelectedForIngestOptionsPopup != null && fileMetadataSelectedForIngestOptionsPopup.getDataFile() != null) {
if (fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest() == null) {
IngestRequest ingestRequest = new IngestRequest();
ingestRequest.setDataFile(fileMetadataSelectedForIngestOptionsPopup.getDataFile());
fileMetadataSelectedForIngestOptionsPopup.getDataFile().setIngestRequest(ingestRequest);
}
fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest().setTextEncoding(ingestLanguageEncoding);
}
}
ingestLanguageEncoding = null;
// Extra labels for SPSS POR (and, possibly, other tabular ingests:)
// (we are adding this parameter to the IngestRequest now, instead of back
// when it was uploaded. This is because we want the user to be able to
// hit cancel and bail out, until they actually click 'save' in the
// "advanced options" popup) -- L.A. 4.0 beta 11
if (savedLabelsTempFile != null) {
if (fileMetadataSelectedForIngestOptionsPopup != null && fileMetadataSelectedForIngestOptionsPopup.getDataFile() != null) {
if (fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest() == null) {
IngestRequest ingestRequest = new IngestRequest();
ingestRequest.setDataFile(fileMetadataSelectedForIngestOptionsPopup.getDataFile());
fileMetadataSelectedForIngestOptionsPopup.getDataFile().setIngestRequest(ingestRequest);
}
fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest().setLabelsFile(savedLabelsTempFile);
}
}
savedLabelsTempFile = null;
fileMetadataSelectedForIngestOptionsPopup = null;
}
private Boolean downloadButtonAvailable = null;
public boolean isDownloadButtonAvailable(){
if (downloadButtonAvailable != null) {
return downloadButtonAvailable;
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (this.fileDownloadHelper.canDownloadFile(fmd)) {
downloadButtonAvailable = true;
return true;
}
}
downloadButtonAvailable = false;
return false;
}
public boolean isFileAccessRequestMultiButtonRequired(){
if (!isSessionUserAuthenticated() || !dataset.isFileAccessRequest()){
return false;
}
if (workingVersion == null) {
return false;
}
if (!workingVersion.getTermsOfUseAndAccess().isFileAccessRequest()){
// return false;
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isFileAccessRequestMultiButtonEnabled(){
if (!isSessionUserAuthenticated() || !dataset.isFileAccessRequest()){
return false;
}
if( this.selectedRestrictedFiles == null || this.selectedRestrictedFiles.isEmpty() ){
return false;
}
for (FileMetadata fmd : this.selectedRestrictedFiles){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
private Boolean downloadButtonAllEnabled = null;
public boolean isDownloadAllButtonEnabled() {
if (downloadButtonAllEnabled == null) {
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (!this.fileDownloadHelper.canDownloadFile(fmd)) {
downloadButtonAllEnabled = false;
break;
}
}
downloadButtonAllEnabled = true;
}
return downloadButtonAllEnabled;
}
public boolean isDownloadSelectedButtonEnabled(){
if( this.selectedFiles == null || this.selectedFiles.isEmpty() ){
return false;
}
for (FileMetadata fmd : this.selectedFiles){
if (this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isFileAccessRequestMultiSignUpButtonRequired(){
if (isSessionUserAuthenticated()){
return false;
}
// only show button if dataset allows an access request
if (!dataset.isFileAccessRequest()){
return false;
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isFileAccessRequestMultiSignUpButtonEnabled(){
if (isSessionUserAuthenticated()){
return false;
}
if( this.selectedRestrictedFiles == null || this.selectedRestrictedFiles.isEmpty() ){
return false;
}
// only show button if dataset allows an access request
if (!dataset.isFileAccessRequest()){
return false;
}
for (FileMetadata fmd : this.selectedRestrictedFiles){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isDownloadPopupRequired() {
return FileUtil.isDownloadPopupRequired(workingVersion);
}
public boolean isRequestAccessPopupRequired() {
return FileUtil.isRequestAccessPopupRequired(workingVersion);
}
public String requestAccessMultipleFiles() {
if (selectedFiles.isEmpty()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('selectFilesForRequestAccess').show()");
return "";
} else {
fileDownloadHelper.clearRequestAccessFiles();
for (FileMetadata fmd : selectedFiles){
fileDownloadHelper.addMultipleFilesForRequestAccess(fmd.getDataFile());
}
if (isRequestAccessPopupRequired()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('requestAccessPopup').show()");
return "";
} else {
//No popup required
fileDownloadHelper.requestAccessIndirect();
return "";
}
}
}
public boolean isSortButtonEnabled() {
/**
* @todo The "Sort" Button seems to stop responding to mouse clicks
* after a while so it can't be shipped in 4.2 and will be deferred, to
* be picked up in https://github.com/IQSS/dataverse/issues/2506
*/
return false;
}
public void updateFileListing(String fileSortField, String fileSortOrder) {
this.fileSortField = fileSortField;
this.fileSortOrder = fileSortOrder;
fileMetadatas = populateFileMetadatas();
}
private List<FileMetadata> populateFileMetadatas() {
if (isSortButtonEnabled()) {
List<FileMetadata> fileMetadatasToSet = new ArrayList<>();
Long datasetVersion = workingVersion.getId();
if (datasetVersion != null) {
int unlimited = 0;
int maxResults = unlimited;
List<FileMetadata> dataFilesNew = datafileService.findFileMetadataByDatasetVersionId(datasetVersion, maxResults, fileSortField, fileSortOrder);
fileMetadatasToSet.addAll(dataFilesNew);
}
return fileMetadatasToSet;
} else {
return new ArrayList<>();
}
}
public String getFileSortField() {
return fileSortField;
}
public void setFileSortField(String fileSortField) {
this.fileSortField = fileSortField;
}
public String getFileSortOrder() {
return fileSortOrder;
}
public void setFileSortOrder(String fileSortOrder) {
this.fileSortOrder = fileSortOrder;
}
public List<FileMetadata> getFileMetadatas() {
if (isSortButtonEnabled()) {
return fileMetadatas;
} else {
return new ArrayList<>();
}
}
public String getFileSortFieldName() {
return FileSortFieldAndOrder.label;
}
public String getFileSortFieldDate() {
return FileSortFieldAndOrder.createDate;
}
public String getFileSortFieldSize() {
return FileSortFieldAndOrder.size;
}
public String getFileSortFieldType() {
return FileSortFieldAndOrder.type;
}
public String getSortByAscending() {
return SortBy.ASCENDING;
}
public String getSortByDescending() {
return SortBy.DESCENDING;
}
PrivateUrl privateUrl;
public PrivateUrl getPrivateUrl() {
return privateUrl;
}
public void setPrivateUrl(PrivateUrl privateUrl) {
this.privateUrl = privateUrl;
}
public void initPrivateUrlPopUp() {
if (privateUrl != null) {
setPrivateUrlJustCreatedToFalse();
}
}
boolean privateUrlWasJustCreated;
public boolean isPrivateUrlWasJustCreated() {
return privateUrlWasJustCreated;
}
public void setPrivateUrlJustCreatedToFalse() {
privateUrlWasJustCreated = false;
}
public void createPrivateUrl() {
try {
PrivateUrl createdPrivateUrl = commandEngine.submit(new CreatePrivateUrlCommand(dvRequestService.getDataverseRequest(), dataset));
privateUrl = createdPrivateUrl;
JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.privateurl.infoMessageAuthor", Arrays.asList(getPrivateUrlLink(privateUrl))));
privateUrlWasJustCreated = true;
} catch (CommandException ex) {
String msg = BundleUtil.getStringFromBundle("dataset.privateurl.noPermToCreate", PrivateUrlUtil.getRequiredPermissions(ex));
logger.info("Unable to create a Private URL for dataset id " + dataset.getId() + ". Message to user: " + msg + " Exception: " + ex);
JH.addErrorMessage(msg);
}
}
public void disablePrivateUrl() {
try {
commandEngine.submit(new DeletePrivateUrlCommand(dvRequestService.getDataverseRequest(), dataset));
privateUrl = null;
JH.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.privateurl.disabledSuccess"));
} catch (CommandException ex) {
logger.info("CommandException caught calling DeletePrivateUrlCommand: " + ex);
}
}
public boolean isUserCanCreatePrivateURL() {
return dataset.getLatestVersion().isDraft();
}
public String getPrivateUrlLink(PrivateUrl privateUrl) {
return privateUrl.getLink();
}
public FileDownloadHelper getFileDownloadHelper() {
return fileDownloadHelper;
}
public void setFileDownloadHelper(FileDownloadHelper fileDownloadHelper) {
this.fileDownloadHelper = fileDownloadHelper;
}
public FileDownloadServiceBean getFileDownloadService() {
return fileDownloadService;
}
public void setFileDownloadService(FileDownloadServiceBean fileDownloadService) {
this.fileDownloadService = fileDownloadService;
}
public GuestbookResponseServiceBean getGuestbookResponseService() {
return guestbookResponseService;
}
public void setGuestbookResponseService(GuestbookResponseServiceBean guestbookResponseService) {
this.guestbookResponseService = guestbookResponseService;
}
public WorldMapPermissionHelper getWorldMapPermissionHelper() {
return worldMapPermissionHelper;
}
public void setWorldMapPermissionHelper(WorldMapPermissionHelper worldMapPermissionHelper) {
this.worldMapPermissionHelper = worldMapPermissionHelper;
}
/**
* dataset title
* @return title of workingVersion
*/
public String getTitle() {
assert (null != workingVersion);
return workingVersion.getTitle();
}
/**
* dataset description
*
* @return description of workingVersion
*/
public String getDescription() {
return workingVersion.getDescriptionPlainText();
}
/**
* dataset authors
*
* @return list of author names
*/
public List<String> getDatasetAuthors() {
assert (workingVersion != null);
return workingVersion.getDatasetAuthorNames();
}
/**
* publisher (aka - name of root dataverse)
*
* @return the publisher of the version
*/
public String getPublisher() {
assert (null != workingVersion);
return workingVersion.getRootDataverseNameforCitation();
}
public void downloadRsyncScript() {
FacesContext ctx = FacesContext.getCurrentInstance();
HttpServletResponse response = (HttpServletResponse) ctx.getExternalContext().getResponse();
response.setContentType("application/download");
String contentDispositionString;
contentDispositionString = "attachment;filename=" + rsyncScriptFilename;
response.setHeader("Content-Disposition", contentDispositionString);
try {
ServletOutputStream out = response.getOutputStream();
out.write(getRsyncScript().getBytes());
out.flush();
ctx.responseComplete();
} catch (IOException e) {
String error = "Problem getting bytes from rsync script: " + e;
logger.warning(error);
return;
}
// If the script has been successfully downloaded, lock the dataset:
String lockInfoMessage = "script downloaded";
DatasetLock lock = datasetService.addDatasetLock(dataset.getId(), DatasetLock.Reason.DcmUpload, session.getUser() != null ? ((AuthenticatedUser)session.getUser()).getId() : null, lockInfoMessage);
if (lock != null) {
dataset.addLock(lock);
} else {
logger.log(Level.WARNING, "Failed to lock the dataset (dataset id={0})", dataset.getId());
}
}
public void closeRsyncScriptPopup(CloseEvent event) {
finishRsyncScriptAction();
}
public String finishRsyncScriptAction() {
// This method is called when the user clicks on "Close" in the "Rsync Upload"
// popup. If they have successfully downloaded the rsync script, the
// dataset should now be locked; which means we should put up the
// "dcm upload in progress" message - that will be shown on the page
// until the rsync upload is completed and the dataset is unlocked.
if (isLocked()) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.summary"), BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.details"));
}
return "";
}
/**
* this method returns the dataset fields to be shown in the dataset summary
* on the dataset page.
* It returns the default summary fields( subject, description, keywords, related publications and notes)
* if the custom summary datafields has not been set, otherwise will set the custom fields set by the sysadmins
*
* @return the dataset fields to be shown in the dataset summary
*/
public List<DatasetField> getDatasetSummaryFields() {
customFields = settingsWrapper.getValueForKey(SettingsServiceBean.Key.CustomDatasetSummaryFields);
return DatasetUtil.getDatasetSummaryFields(workingVersion, customFields);
}
public List<ExternalTool> getConfigureToolsForDataFile(Long fileId) {
return getCachedToolsForDataFile(fileId, ExternalTool.Type.CONFIGURE);
}
public List<ExternalTool> getExploreToolsForDataFile(Long fileId) {
return getCachedToolsForDataFile(fileId, ExternalTool.Type.EXPLORE);
}
public List<ExternalTool> getCachedToolsForDataFile(Long fileId, ExternalTool.Type type) {
Map<Long, List<ExternalTool>> cachedToolsByFileId = new HashMap<>();
List<ExternalTool> externalTools = new ArrayList<>();
switch (type) {
case EXPLORE:
cachedToolsByFileId = exploreToolsByFileId;
externalTools = exploreTools;
break;
case CONFIGURE:
cachedToolsByFileId = configureToolsByFileId;
externalTools = configureTools;
break;
default:
break;
}
List<ExternalTool> cachedTools = cachedToolsByFileId.get(fileId);
if (cachedTools != null) { //if already queried before and added to list
return cachedTools;
}
DataFile dataFile = datafileService.find(fileId);
cachedTools = ExternalToolServiceBean.findExternalToolsByFile(externalTools, dataFile);
cachedToolsByFileId.put(fileId, cachedTools); //add to map so we don't have to do the lifting again
return cachedTools;
}
Boolean thisLatestReleasedVersion = null;
public boolean isThisLatestReleasedVersion() {
if (thisLatestReleasedVersion != null) {
return thisLatestReleasedVersion;
}
if (!workingVersion.isPublished()) {
thisLatestReleasedVersion = false;
return false;
}
DatasetVersion latestPublishedVersion = null;
Command<DatasetVersion> cmd = new GetLatestPublishedDatasetVersionCommand(dvRequestService.getDataverseRequest(), dataset);
try {
latestPublishedVersion = commandEngine.submit(cmd);
} catch (Exception ex) {
// whatever...
}
thisLatestReleasedVersion = workingVersion.equals(latestPublishedVersion);
return thisLatestReleasedVersion;
}
public String getJsonLd() {
if (isThisLatestReleasedVersion()) {
ExportService instance = ExportService.getInstance(settingsService);
String jsonLd = instance.getExportAsString(dataset, SchemaDotOrgExporter.NAME);
if (jsonLd != null) {
logger.fine("Returning cached schema.org JSON-LD.");
return jsonLd;
} else {
logger.fine("No cached schema.org JSON-LD available. Going to the database.");
return workingVersion.getJsonLd();
}
}
return "";
}
public void selectAllFiles() {
logger.fine("selectAllFiles called");
selectedFiles = workingVersion.getFileMetadatas();
}
public void clearSelection() {
logger.info("clearSelection called");
selectedFiles = Collections.EMPTY_LIST;
}
public void fileListingPaginatorListener(PageEvent event) {
setFilePaginatorPage(event.getPage());
}
public void refreshPaginator() {
FacesContext facesContext = FacesContext.getCurrentInstance();
org.primefaces.component.datatable.DataTable dt = (org.primefaces.component.datatable.DataTable) facesContext.getViewRoot().findComponent("datasetForm:tabView:filesTable");
setFilePaginatorPage(dt.getPage());
setRowsPerPage(dt.getRowsToRender());
}
}
| 1 | 38,849 | These changes just match the logic update in the next method that was done previously - to assure the whole set of files is valid rather than that there is at least one that would be changed. | IQSS-dataverse | java |
@@ -30,6 +30,11 @@ func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
jsonhttp.InternalServerError(w, "cannot get or create tag")
return
}
+ w.Header().Set(SwarmTagUidHeader, fmt.Sprint(tag.Uid))
+ w.WriteHeader(http.StatusContinue)
+ if f, ok := w.(http.Flusher); ok {
+ f.Flush()
+ }
// Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag) | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"fmt"
"net/http"
"strings"
"github.com/ethersphere/bee/pkg/file"
"github.com/ethersphere/bee/pkg/file/splitter"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/gorilla/mux"
)
type bytesPostResponse struct {
Reference swarm.Address `json:"reference"`
}
// bytesUploadHandler handles upload of raw binary data of arbitrary length.
func (s *server) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
tag, created, err := s.getOrCreateTag(r.Header.Get(SwarmTagUidHeader))
if err != nil {
s.Logger.Debugf("bytes upload: get or create tag: %v", err)
s.Logger.Error("bytes upload: get or create tag")
jsonhttp.InternalServerError(w, "cannot get or create tag")
return
}
// Add the tag to the context
ctx := sctx.SetTag(r.Context(), tag)
toEncrypt := strings.ToLower(r.Header.Get(EncryptHeader)) == "true"
sp := splitter.NewSimpleSplitter(s.Storer, requestModePut(r))
address, err := file.SplitWriteAll(ctx, sp, r.Body, r.ContentLength, toEncrypt)
if err != nil {
s.Logger.Debugf("bytes upload: split write all: %v", err)
s.Logger.Error("bytes upload: split write all")
jsonhttp.InternalServerError(w, nil)
return
}
if created {
tag.DoneSplit(address)
}
w.Header().Set(SwarmTagUidHeader, fmt.Sprint(tag.Uid))
w.Header().Set("Access-Control-Expose-Headers", SwarmTagUidHeader)
jsonhttp.OK(w, bytesPostResponse{
Reference: address,
})
}
// bytesGetHandler handles retrieval of raw binary data of arbitrary length.
func (s *server) bytesGetHandler(w http.ResponseWriter, r *http.Request) {
addressHex := mux.Vars(r)["address"]
address, err := swarm.ParseHexAddress(addressHex)
if err != nil {
s.Logger.Debugf("bytes: parse address %s: %v", addressHex, err)
s.Logger.Error("bytes: parse address error")
jsonhttp.BadRequest(w, "invalid address")
return
}
additionalHeaders := http.Header{
"Content-Type": {"application/octet-stream"},
}
s.downloadHandler(w, r, address, additionalHeaders)
}
| 1 | 11,928 | Why setting the status to Continue as at the end it is written OK? Status should be written only once. | ethersphere-bee | go |
@@ -140,10 +140,10 @@ func deps() {
cmd("go get -u github.com/pmezard/go-difflib/difflib"),
cmd("./scripts/install-rust-proofs.sh"),
cmd("./scripts/install-bls-signatures.sh"),
+ cmd("./proofs/bin/paramcache"),
+ cmd("./scripts/copy-groth-params.sh"),
}
- cmds = append(cmds, hydrateParamCache()...)
-
for _, c := range cmds {
runCmd(c)
} | 1 | package main
import (
"fmt"
gobuild "go/build"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/filecoin-project/go-filecoin/util/version"
)
var lineBreak = "\n"
func init() {
log.SetFlags(0)
if runtime.GOOS == "windows" {
lineBreak = "\r\n"
}
}
// command is a structure representing a shell command to be run in the
// specified directory
type command struct {
dir string
parts []string
}
// cmd creates a new command using the pwd and its cwd
func cmd(parts ...string) command {
return cmdWithDir("./", parts...)
}
// cmdWithDir creates a new command using the specified directory as its cwd
func cmdWithDir(dir string, parts ...string) command {
return command{
dir: dir,
parts: parts,
}
}
func runCmd(c command) {
parts := c.parts
if len(parts) == 1 {
parts = strings.Split(parts[0], " ")
}
name := strings.Join(parts, " ")
cmd := exec.Command(parts[0], parts[1:]...) // #nosec
cmd.Dir = c.dir
log.Println(name)
stderr, err := cmd.StderrPipe()
if err != nil {
panic(err)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
panic(err)
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
if _, err = io.Copy(os.Stderr, stderr); err != nil {
panic(err)
}
}()
go func() {
defer wg.Done()
if _, err = io.Copy(os.Stdout, stdout); err != nil {
panic(err)
}
}()
if err := cmd.Start(); err != nil {
panic(err)
}
wg.Wait()
if err := cmd.Wait(); err != nil {
log.Fatalf("Command '%s' failed: %s\n", name, err)
}
}
func runCapture(name string) string {
args := strings.Split(name, " ")
cmd := exec.Command(args[0], args[1:]...) // #nosec
log.Println(name)
output, err := cmd.CombinedOutput()
if err != nil {
log.Fatalf("Command '%s' failed: %s\n", name, err)
}
return strings.Trim(string(output), lineBreak)
}
// hydrateParamCache hydrates the groth parameter cache used when sealing a
// sector to ensure consistent test runs. If the cache is hydrated lazily (the
// first time that seal runs), a test could take longer than expected and time
// out.
func hydrateParamCache() []command {
return []command{
cmd("./proofs/bin/paramcache"),
}
}
// deps installs all dependencies
func deps() {
runCmd(cmd("pkg-config --version"))
log.Println("Installing dependencies...")
cmds := []command{
cmd("go get -u github.com/whyrusleeping/gx"),
cmd("go get -u github.com/whyrusleeping/gx-go"),
cmd("gx install"),
cmd("gx-go rewrite"),
cmd("go get -u github.com/alecthomas/gometalinter"),
cmd("gometalinter --install"),
cmd("go get -u github.com/stretchr/testify"),
cmd("go get -u github.com/xeipuuv/gojsonschema"),
cmd("go get -u github.com/ipfs/iptb"),
cmd("go get -u github.com/docker/docker/api/types"),
cmd("go get -u github.com/docker/docker/api/types/container"),
cmd("go get -u github.com/docker/docker/client"),
cmd("go get -u github.com/docker/docker/pkg/stdcopy"),
cmd("go get -u github.com/ipsn/go-secp256k1"),
cmd("go get -u github.com/json-iterator/go"),
cmd("go get -u github.com/prometheus/client_golang/prometheus"),
cmd("go get -u github.com/prometheus/client_golang/prometheus/promhttp"),
cmd("go get -u github.com/jstemmer/go-junit-report"),
cmd("go get -u github.com/pmezard/go-difflib/difflib"),
cmd("./scripts/install-rust-proofs.sh"),
cmd("./scripts/install-bls-signatures.sh"),
}
cmds = append(cmds, hydrateParamCache()...)
for _, c := range cmds {
runCmd(c)
}
}
// smartdeps avoids fetching from the network
func smartdeps() {
runCmd(cmd("pkg-config --version"))
log.Println("Installing dependencies...")
// commands we need to run
cmds := []command{
cmd("gx install"),
cmd("gx-go rewrite"),
cmd("gometalinter --install"),
cmd("./scripts/install-rust-proofs.sh"),
cmd("./scripts/install-bls-signatures.sh"),
}
cmds = append(cmds, hydrateParamCache()...)
// packages we need to install
pkgs := []string{
"github.com/alecthomas/gometalinter",
"github.com/docker/docker/api/types",
"github.com/docker/docker/api/types/container",
"github.com/docker/docker/client",
"github.com/docker/docker/pkg/stdcopy",
"github.com/ipfs/iptb",
"github.com/stretchr/testify",
"github.com/whyrusleeping/gx",
"github.com/whyrusleeping/gx-go",
"github.com/xeipuuv/gojsonschema",
"github.com/json-iterator/go",
"github.com/ipsn/go-secp256k1",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/prometheus/client_golang/prometheus",
"github.com/jstemmer/go-junit-report",
"github.com/pmezard/go-difflib/difflib",
}
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = gobuild.Default.GOPATH
}
gpbin := filepath.Join(gopath, "bin")
var gopathBinFound bool
for _, s := range strings.Split(os.Getenv("PATH"), ":") {
if s == gpbin {
gopathBinFound = true
}
}
if !gopathBinFound {
fmt.Println("'$GOPATH/bin' is not in your $PATH.")
fmt.Println("See https://golang.org/doc/code.html#GOPATH for more information.")
return
}
// if the package exists locally install it, else fetch it
for _, pkg := range pkgs {
pkgpath := filepath.Join(gopath, "src", pkg)
if _, err := os.Stat(pkgpath); os.IsNotExist(err) {
runCmd(cmd(fmt.Sprintf("go get %s", pkg)))
} else {
runCmd(cmd(fmt.Sprintf("go install %s", pkg)))
}
}
for _, c := range cmds {
runCmd(c)
}
}
// lint runs linting using gometalinter
func lint(packages ...string) {
if len(packages) == 0 {
packages = []string{"./..."}
}
log.Printf("Linting %s ...\n", strings.Join(packages, " "))
// Run fast linters batched together
configs := []string{
"gometalinter",
"--skip=sharness",
"--skip=vendor",
"--disable-all",
}
fastLinters := []string{
"--enable=vet",
"--enable=gofmt",
"--enable=misspell",
"--enable=goconst",
"--enable=golint",
"--enable=errcheck",
"--min-occurrences=6", // for goconst
}
runCmd(cmd(append(append(configs, fastLinters...), packages...)...))
slowLinters := []string{
"--deadline=10m",
"--enable=unconvert",
"--enable=staticcheck",
"--enable=varcheck",
"--enable=structcheck",
"--enable=deadcode",
}
runCmd(cmd(append(append(configs, slowLinters...), packages...)...))
}
func build() {
buildFilecoin()
buildGengen()
buildFaucet()
buildGenesisFileServer()
generateGenesis()
}
func generateGenesis() {
log.Println("Generating genesis...")
runCmd(cmd([]string{
"./gengen/gengen",
"--keypath", "fixtures",
"--out-car", "fixtures/genesis.car",
"--out-json", "fixtures/gen.json",
"--config", "./fixtures/setup.json",
}...))
}
func buildFilecoin() {
log.Println("Building go-filecoin...")
commit := runCapture("git log -n 1 --format=%H")
runCmd(cmd([]string{
"go", "build",
"-ldflags", fmt.Sprintf("-X github.com/filecoin-project/go-filecoin/flags.Commit=%s", commit),
"-v", "-o", "go-filecoin", ".",
}...))
}
func buildGengen() {
log.Println("Building gengen utils...")
runCmd(cmd([]string{"go", "build", "-o", "./gengen/gengen", "./gengen"}...))
}
func buildFaucet() {
log.Println("Building faucet...")
runCmd(cmd([]string{"go", "build", "-o", "./tools/faucet/faucet", "./tools/faucet/"}...))
}
func buildGenesisFileServer() {
log.Println("Building genesis file server...")
runCmd(cmd([]string{"go", "build", "-o", "./tools/genesis-file-server/genesis-file-server", "./tools/genesis-file-server/"}...))
}
func install() {
log.Println("Installing...")
runCmd(cmd("go install"))
}
// test executes tests and passes along all additional arguments to `go test`.
func test(args ...string) {
log.Println("Testing...")
runCmd(cmd(fmt.Sprintf("go test -parallel 8 ./... %s", strings.Join(args, " "))))
}
func main() {
args := os.Args[1:]
if len(args) == 0 {
log.Fatalf("Missing command")
}
if !version.Check(runtime.Version()) {
log.Fatalf("Invalid go version: %s", runtime.Version())
}
cmd := args[0]
switch cmd {
case "deps":
deps()
case "smartdeps":
smartdeps()
case "lint":
lint(args[1:]...)
case "build-filecoin":
buildFilecoin()
case "build-gengen":
buildGengen()
case "generate-genesis":
generateGenesis()
case "build":
build()
case "test":
test(args[1:]...)
case "install":
install()
case "best":
build()
test(args[1:]...)
case "all":
deps()
lint()
build()
test(args[1:]...)
default:
log.Fatalf("Unknown command: %s\n", cmd)
}
}
| 1 | 16,948 | Howdy! You'll want to do this same thing (replace `hydrateParamCache` with `proofs/bin/paramcache` and then do the copy) in `smartdeps`, too. | filecoin-project-venus | go |
@@ -46,6 +46,16 @@ import (
// CmdSnaphotCreateOptions holds the options for snapshot
// create command
+var (
+ snapshotCreateCommandHelpText = `
+ usage: mayactl snapshot create --volname <vol> --snapname <snap>
+
+ this command creates a volume snapshot
+
+ note: the volume should exit before itself
+ `
+)
+
type CmdSnaphotCreateOptions struct {
volName string
snapName string | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"errors"
"fmt"
"github.com/openebs/maya/pkg/client/mapiserver"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
)
/*func init() {
host := os.Getenv("MAPI_ADDR")
port := os.Getenv("MAPI_PORT")
defaultEndpoint := fmt.Sprintf("%s:%s", host, port)
if host == "" || port == "" {
fmt.Println("$MAPI_ADDR or $MAPI_ADDR are not set. Check if the maya-apiserver is running.")
defaultEndpoint = ""
}
cmd.PersistentFlags().StringVar(&APIServerEndpoint, "api-server-endpoint", defaultEndpoint, "IP endpoint of API server instance (required)")
cmd.PersistentFlags().StringVar(&logLevelRaw, "log-level", "WARNING", "logging level for logging/tracing output (valid values: CRITICAL,ERROR,WARNING,NOTICE,INFO,DEBUG,TRACE)")
cmd.MarkFlagRequired("api-server-endpoint")
// load the environment variables
//flags.SetFlagsFromEnv(cmd.PersistentFlags(), "MAYA")
}
*/
// CmdSnaphotCreateOptions holds the options for snapshot
// create command
type CmdSnaphotCreateOptions struct {
volName string
snapName string
}
// NewCmdSnapshotCreate creates a snapshot of OpenEBS Volume
func NewCmdSnapshotCreate() *cobra.Command {
options := CmdSnaphotCreateOptions{}
cmd := &cobra.Command{
Use: "create",
Short: "Creates a new Snapshot",
//Long: SnapshotCreateCommandHelpText,
Run: func(cmd *cobra.Command, args []string) {
util.CheckErr(options.Validate(cmd), util.Fatal)
util.CheckErr(options.RunSnapshotCreate(cmd), util.Fatal)
},
}
cmd.Flags().StringVarP(&options.volName, "volname", "n", options.volName,
"unique volume name.")
cmd.MarkPersistentFlagRequired("volname")
cmd.MarkPersistentFlagRequired("snapname")
cmd.Flags().StringVarP(&options.snapName, "snapname", "s", options.snapName,
"unique snapshot name")
return cmd
}
// Validate validates the flag values
func (c *CmdSnaphotCreateOptions) Validate(cmd *cobra.Command) error {
if c.volName == "" {
return errors.New("--volname is missing. Please specify an unique name")
}
if c.snapName == "" {
return errors.New("--snapname is missing. Please specify an unique name")
}
return nil
}
// RunSnapshotCreate does tasks related to mayaserver.
func (c *CmdSnaphotCreateOptions) RunSnapshotCreate(cmd *cobra.Command) error {
fmt.Println("Executing volume snapshot create...")
resp := mapiserver.CreateSnapshot(c.volName, c.snapName)
if resp != nil {
return fmt.Errorf("Snapshot create failed: %v", resp)
}
fmt.Printf("Volume snapshot Successfully Created:%v\n", c.volName)
return nil
}
| 1 | 8,216 | nit : Remove the extra lines | openebs-maya | go |
@@ -184,6 +184,7 @@ type Options struct {
ServerName string `json:"server_name"`
Host string `json:"addr"`
Port int `json:"port"`
+ DontListen bool `json:"dont_listen"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"` | 1 | // Copyright 2012-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
"github.com/nats-io/nats-server/v2/conf"
)
var allowUnknownTopLevelField = int32(0)
// NoErrOnUnknownFields can be used to change the behavior the processing
// of a configuration file. By default, an error is reported if unknown
// fields are found. If `noError` is set to true, no error will be reported
// if top-level unknown fields are found.
func NoErrOnUnknownFields(noError bool) {
var val int32
if noError {
val = int32(1)
}
atomic.StoreInt32(&allowUnknownTopLevelField, val)
}
// Set of lower case hex-encoded sha256 of DER encoded SubjectPublicKeyInfo
type PinnedCertSet map[string]struct{}
// ClusterOpts are options for clusters.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type ClusterOpts struct {
Name string `json:"-"`
Host string `json:"addr,omitempty"`
Port int `json:"cluster_port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
Permissions *RoutePermissions `json:"-"`
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSMap bool `json:"-"`
TLSCheckKnownURLs bool `json:"-"`
TLSPinnedCerts PinnedCertSet `json:"-"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ConnectRetries int `json:"-"`
// Not exported (used in tests)
resolver netResolver
// Snapshot of configured TLS options.
tlsConfigOpts *TLSConfigOpts
}
// GatewayOpts are options for gateways.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type GatewayOpts struct {
Name string `json:"name"`
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
TLSCheckKnownURLs bool `json:"-"`
TLSPinnedCerts PinnedCertSet `json:"-"`
Advertise string `json:"advertise,omitempty"`
ConnectRetries int `json:"connect_retries,omitempty"`
Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"`
RejectUnknown bool `json:"reject_unknown,omitempty"` // config got renamed to reject_unknown_cluster
// Not exported, for tests.
resolver netResolver
sendQSubsBufSize int
// Snapshot of configured TLS options.
tlsConfigOpts *TLSConfigOpts
}
// RemoteGatewayOpts are options for connecting to a remote gateway
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type RemoteGatewayOpts struct {
Name string `json:"name"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
tlsConfigOpts *TLSConfigOpts
}
// LeafNodeOpts are options for a given server to accept leaf node connections and/or connect to a remote cluster.
type LeafNodeOpts struct {
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
Account string `json:"-"`
Users []*User `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
TLSPinnedCerts PinnedCertSet `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ReconnectInterval time.Duration `json:"-"`
// For solicited connections to other clusters/superclusters.
Remotes []*RemoteLeafOpts `json:"remotes,omitempty"`
// Not exported, for tests.
resolver netResolver
dialTimeout time.Duration
connDelay time.Duration
// Snapshot of configured TLS options.
tlsConfigOpts *TLSConfigOpts
}
// RemoteLeafOpts are options for connecting to a remote server as a leaf node.
type RemoteLeafOpts struct {
LocalAccount string `json:"local_account,omitempty"`
NoRandomize bool `json:"-"`
URLs []*url.URL `json:"urls,omitempty"`
Credentials string `json:"-"`
TLS bool `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
Hub bool `json:"hub,omitempty"`
DenyImports []string `json:"-"`
DenyExports []string `json:"-"`
// When an URL has the "ws" (or "wss") scheme, then the server will initiate the
// connection as a websocket connection. By default, the websocket frames will be
// masked (as if this server was a websocket client to the remote server). The
// NoMasking option will change this behavior and will send umasked frames.
Websocket struct {
Compression bool `json:"-"`
NoMasking bool `json:"-"`
}
tlsConfigOpts *TLSConfigOpts
}
// Options block for nats-server.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type Options struct {
ConfigFile string `json:"-"`
ServerName string `json:"server_name"`
Host string `json:"addr"`
Port int `json:"port"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"`
TraceVerbose bool `json:"-"`
NoLog bool `json:"-"`
NoSigs bool `json:"-"`
NoSublistCache bool `json:"-"`
NoHeaderSupport bool `json:"-"`
DisableShortFirstPing bool `json:"-"`
Logtime bool `json:"-"`
MaxConn int `json:"max_connections"`
MaxSubs int `json:"max_subscriptions,omitempty"`
Nkeys []*NkeyUser `json:"-"`
Users []*User `json:"-"`
Accounts []*Account `json:"-"`
NoAuthUser string `json:"-"`
SystemAccount string `json:"-"`
NoSystemAccount bool `json:"-"`
AllowNewAccounts bool `json:"-"`
Username string `json:"-"`
Password string `json:"-"`
Authorization string `json:"-"`
PingInterval time.Duration `json:"ping_interval"`
MaxPingsOut int `json:"ping_max"`
HTTPHost string `json:"http_host"`
HTTPPort int `json:"http_port"`
HTTPBasePath string `json:"http_base_path"`
HTTPSPort int `json:"https_port"`
AuthTimeout float64 `json:"auth_timeout"`
MaxControlLine int32 `json:"max_control_line"`
MaxPayload int32 `json:"max_payload"`
MaxPending int64 `json:"max_pending"`
Cluster ClusterOpts `json:"cluster,omitempty"`
Gateway GatewayOpts `json:"gateway,omitempty"`
LeafNode LeafNodeOpts `json:"leaf,omitempty"`
JetStream bool `json:"jetstream"`
JetStreamMaxMemory int64 `json:"-"`
JetStreamMaxStore int64 `json:"-"`
JetStreamDomain string `json:"-"`
JetStreamKey string `json:"-"`
StoreDir string `json:"-"`
Websocket WebsocketOpts `json:"-"`
MQTT MQTTOpts `json:"-"`
ProfPort int `json:"-"`
PidFile string `json:"-"`
PortsFileDir string `json:"-"`
LogFile string `json:"-"`
LogSizeLimit int64 `json:"-"`
Syslog bool `json:"-"`
RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"`
RoutesStr string `json:"-"`
TLSTimeout float64 `json:"tls_timeout"`
TLS bool `json:"-"`
TLSVerify bool `json:"-"`
TLSMap bool `json:"-"`
TLSCert string `json:"-"`
TLSKey string `json:"-"`
TLSCaCert string `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSPinnedCerts PinnedCertSet `json:"-"`
AllowNonTLS bool `json:"-"`
WriteDeadline time.Duration `json:"-"`
MaxClosedClients int `json:"-"`
LameDuckDuration time.Duration `json:"-"`
LameDuckGracePeriod time.Duration `json:"-"`
// MaxTracedMsgLen is the maximum printable length for traced messages.
MaxTracedMsgLen int `json:"-"`
// Operating a trusted NATS server
TrustedKeys []string `json:"-"`
TrustedOperators []*jwt.OperatorClaims `json:"-"`
AccountResolver AccountResolver `json:"-"`
AccountResolverTLSConfig *tls.Config `json:"-"`
CustomClientAuthentication Authentication `json:"-"`
CustomRouterAuthentication Authentication `json:"-"`
// CheckConfig configuration file syntax test was successful and exit.
CheckConfig bool `json:"-"`
// ConnectErrorReports specifies the number of failed attempts
// at which point server should report the failure of an initial
// connection to a route, gateway or leaf node.
// See DEFAULT_CONNECT_ERROR_REPORTS for default value.
ConnectErrorReports int
// ReconnectErrorReports is similar to ConnectErrorReports except
// that this applies to reconnect events.
ReconnectErrorReports int
// Tags describing the server. They will be included in varz
// and used as a filter criteria for some system requests
Tags jwt.TagList `json:"-"`
// OCSPConfig enables OCSP Stapling in the server.
OCSPConfig *OCSPConfig
tlsConfigOpts *TLSConfigOpts
// private fields, used to know if bool options are explicitly
// defined in config and/or command line params.
inConfig map[string]bool
inCmdLine map[string]bool
// private fields for operator mode
operatorJWT []string
resolverPreloads map[string]string
// private fields, used for testing
gatewaysSolicitDelay time.Duration
routeProto int
}
// WebsocketOpts are options for websocket
type WebsocketOpts struct {
// The server will accept websocket client connections on this hostname/IP.
Host string
// The server will accept websocket client connections on this port.
Port int
// The host:port to advertise to websocket clients in the cluster.
Advertise string
// If no user name is provided when a client connects, will default to the
// matching user from the global list of users in `Options.Users`.
NoAuthUser string
// Name of the cookie, which if present in WebSocket upgrade headers,
// will be treated as JWT during CONNECT phase as long as
// "jwt" specified in the CONNECT options is missing or empty.
JWTCookie string
// Authentication section. If anything is configured in this section,
// it will override the authorization configuration of regular clients.
Username string
Password string
Token string
// Timeout for the authentication process.
AuthTimeout float64
// By default the server will enforce the use of TLS. If no TLS configuration
// is provided, you need to explicitly set NoTLS to true to allow the server
// to start without TLS configuration. Note that if a TLS configuration is
// present, this boolean is ignored and the server will run the Websocket
// server with that TLS configuration.
// Running without TLS is less secure since Websocket clients that use bearer
// tokens will send them in clear. So this should not be used in production.
NoTLS bool
// TLS configuration is required.
TLSConfig *tls.Config
// If true, map certificate values for authentication purposes.
TLSMap bool
// When present, accepted client certificates (verify/verify_and_map) must be in this list
TLSPinnedCerts PinnedCertSet
// If true, the Origin header must match the request's host.
SameOrigin bool
// Only origins in this list will be accepted. If empty and
// SameOrigin is false, any origin is accepted.
AllowedOrigins []string
// If set to true, the server will negotiate with clients
// if compression can be used. If this is false, no compression
// will be used (both in server and clients) since it has to
// be negotiated between both endpoints
Compression bool
// Total time allowed for the server to read the client request
// and write the response back to the client. This include the
// time needed for the TLS Handshake.
HandshakeTimeout time.Duration
}
// MQTTOpts are options for MQTT
type MQTTOpts struct {
// The server will accept MQTT client connections on this hostname/IP.
Host string
// The server will accept MQTT client connections on this port.
Port int
// If no user name is provided when a client connects, will default to the
// matching user from the global list of users in `Options.Users`.
NoAuthUser string
// Authentication section. If anything is configured in this section,
// it will override the authorization configuration of regular clients.
Username string
Password string
Token string
// Timeout for the authentication process.
AuthTimeout float64
// TLS configuration is required.
TLSConfig *tls.Config
// If true, map certificate values for authentication purposes.
TLSMap bool
// Timeout for the TLS handshake
TLSTimeout float64
// Set of allowable certificates
TLSPinnedCerts PinnedCertSet
// AckWait is the amount of time after which a QoS 1 message sent to
// a client is redelivered as a DUPLICATE if the server has not
// received the PUBACK on the original Packet Identifier.
// The value has to be positive.
// Zero will cause the server to use the default value (30 seconds).
// Note that changes to this option is applied only to new MQTT subscriptions.
AckWait time.Duration
// MaxAckPending is the amount of QoS 1 messages the server can send to
// a subscription without receiving any PUBACK for those messages.
// The valid range is [0..65535].
// The total of subscriptions' MaxAckPending on a given session cannot
// exceed 65535. Attempting to create a subscription that would bring
// the total above the limit would result in the server returning 0x80
// in the SUBACK for this subscription.
// Due to how the NATS Server handles the MQTT "#" wildcard, each
// subscription ending with "#" will use 2 times the MaxAckPending value.
// Note that changes to this option is applied only to new subscriptions.
MaxAckPending uint16
}
type netResolver interface {
LookupHost(ctx context.Context, host string) ([]string, error)
}
// Clone performs a deep copy of the Options struct, returning a new clone
// with all values copied.
func (o *Options) Clone() *Options {
if o == nil {
return nil
}
clone := &Options{}
*clone = *o
if o.Users != nil {
clone.Users = make([]*User, len(o.Users))
for i, user := range o.Users {
clone.Users[i] = user.clone()
}
}
if o.Nkeys != nil {
clone.Nkeys = make([]*NkeyUser, len(o.Nkeys))
for i, nkey := range o.Nkeys {
clone.Nkeys[i] = nkey.clone()
}
}
if o.Routes != nil {
clone.Routes = deepCopyURLs(o.Routes)
}
if o.TLSConfig != nil {
clone.TLSConfig = o.TLSConfig.Clone()
}
if o.Cluster.TLSConfig != nil {
clone.Cluster.TLSConfig = o.Cluster.TLSConfig.Clone()
}
if o.Gateway.TLSConfig != nil {
clone.Gateway.TLSConfig = o.Gateway.TLSConfig.Clone()
}
if len(o.Gateway.Gateways) > 0 {
clone.Gateway.Gateways = make([]*RemoteGatewayOpts, len(o.Gateway.Gateways))
for i, g := range o.Gateway.Gateways {
clone.Gateway.Gateways[i] = g.clone()
}
}
// FIXME(dlc) - clone leaf node stuff.
return clone
}
func deepCopyURLs(urls []*url.URL) []*url.URL {
if urls == nil {
return nil
}
curls := make([]*url.URL, len(urls))
for i, u := range urls {
cu := &url.URL{}
*cu = *u
curls[i] = cu
}
return curls
}
// Configuration file authorization section.
type authorization struct {
// Singles
user string
pass string
token string
acc string
// Multiple Nkeys/Users
nkeys []*NkeyUser
users []*User
timeout float64
defaultPermissions *Permissions
}
// TLSConfigOpts holds the parsed tls config information,
// used with flag parsing
type TLSConfigOpts struct {
CertFile string
KeyFile string
CaFile string
Verify bool
Insecure bool
Map bool
TLSCheckKnownURLs bool
Timeout float64
Ciphers []uint16
CurvePreferences []tls.CurveID
PinnedCerts PinnedCertSet
}
// OCSPConfig represents the options of OCSP stapling options.
type OCSPConfig struct {
// Mode defines the policy for OCSP stapling.
Mode OCSPMode
// OverrideURLs is the http URL endpoint used to get OCSP staples.
OverrideURLs []string
}
var tlsUsage = `
TLS configuration is specified in the tls section of a configuration file:
e.g.
tls {
cert_file: "./certs/server-cert.pem"
key_file: "./certs/server-key.pem"
ca_file: "./certs/ca.pem"
verify: true
verify_and_map: true
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
}
Available cipher suites include:
`
// ProcessConfigFile processes a configuration file.
// FIXME(dlc): A bit hacky
func ProcessConfigFile(configFile string) (*Options, error) {
opts := &Options{}
if err := opts.ProcessConfigFile(configFile); err != nil {
// If only warnings then continue and return the options.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
return opts, nil
}
return nil, err
}
return opts, nil
}
// token is an item parsed from the configuration.
type token interface {
Value() interface{}
Line() int
IsUsedVariable() bool
SourceFile() string
Position() int
}
// unwrapValue can be used to get the token and value from an item
// to be able to report the line number in case of an incorrect
// configuration.
// also stores the token in lastToken for use in convertPanicToError
func unwrapValue(v interface{}, lastToken *token) (token, interface{}) {
switch tk := v.(type) {
case token:
if lastToken != nil {
*lastToken = tk
}
return tk, tk.Value()
default:
return nil, v
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToErrorList(lastToken *token, errors *[]error) {
// only recover if an error can be stored
if errors == nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*errors = append(*errors, &configErr{*lastToken, fmt.Sprint(err)})
} else {
*errors = append(*errors, fmt.Errorf("encountered panic without a token %v", err))
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToError(lastToken *token, e *error) {
// only recover if an error can be stored
if e == nil || *e != nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*e = &configErr{*lastToken, fmt.Sprint(err)}
} else {
*e = fmt.Errorf("%v", err)
}
}
// configureSystemAccount configures a system account
// if present in the configuration.
func configureSystemAccount(o *Options, m map[string]interface{}) (retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
configure := func(v interface{}) error {
tk, v := unwrapValue(v, <)
sa, ok := v.(string)
if !ok {
return &configErr{tk, "system account name must be a string"}
}
o.SystemAccount = sa
return nil
}
if v, ok := m["system_account"]; ok {
return configure(v)
} else if v, ok := m["system"]; ok {
return configure(v)
}
return nil
}
// ProcessConfigFile updates the Options structure with options
// present in the given configuration file.
// This version is convenient if one wants to set some default
// options and then override them with what is in the config file.
// For instance, this version allows you to do something such as:
//
// opts := &Options{Debug: true}
// opts.ProcessConfigFile(myConfigFile)
//
// If the config file contains "debug: false", after this call,
// opts.Debug would really be false. It would be impossible to
// achieve that with the non receiver ProcessConfigFile() version,
// since one would not know after the call if "debug" was not present
// or was present but set to false.
func (o *Options) ProcessConfigFile(configFile string) error {
o.ConfigFile = configFile
if configFile == "" {
return nil
}
m, err := conf.ParseFileWithChecks(configFile)
if err != nil {
return err
}
// Collect all errors and warnings and report them all together.
errors := make([]error, 0)
warnings := make([]error, 0)
// First check whether a system account has been defined,
// as that is a condition for other features to be enabled.
if err := configureSystemAccount(o, m); err != nil {
errors = append(errors, err)
}
for k, v := range m {
o.processConfigFileLine(k, v, &errors, &warnings)
}
if len(errors) > 0 || len(warnings) > 0 {
return &processConfigErr{
errors: errors,
warnings: warnings,
}
}
return nil
}
func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error, warnings *[]error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch strings.ToLower(k) {
case "listen":
hp, err := parseListen(v)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.Host = hp.host
o.Port = hp.port
case "client_advertise":
o.ClientAdvertise = v.(string)
case "port":
o.Port = int(v.(int64))
case "server_name":
o.ServerName = v.(string)
case "host", "net":
o.Host = v.(string)
case "debug":
o.Debug = v.(bool)
trackExplicitVal(o, &o.inConfig, "Debug", o.Debug)
case "trace":
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "trace_verbose":
o.TraceVerbose = v.(bool)
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "TraceVerbose", o.TraceVerbose)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "logtime":
o.Logtime = v.(bool)
trackExplicitVal(o, &o.inConfig, "Logtime", o.Logtime)
case "mappings", "maps":
gacc := NewAccount(globalAccountName)
o.Accounts = append(o.Accounts, gacc)
err := parseAccountMappings(tk, gacc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "disable_sublist_cache", "no_sublist_cache":
o.NoSublistCache = v.(bool)
case "accounts":
err := parseAccounts(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
o.Username = auth.user
o.Password = auth.pass
o.Authorization = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, "Cannot have a user/pass and token"}
*errors = append(*errors, err)
return
}
o.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, "Can not have a single user/pass and a users array"}
*errors = append(*errors, err)
return
}
if auth.token != "" {
err := &configErr{tk, "Can not have a token and a users array"}
*errors = append(*errors, err)
return
}
// Users may have been added from Accounts parsing, so do an append here
o.Users = append(o.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
// NKeys may have been added from Accounts parsing, so do an append here
o.Nkeys = append(o.Nkeys, auth.nkeys...)
}
case "http":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPPort = hp.port
case "https":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPSPort = hp.port
case "http_port", "monitor_port":
o.HTTPPort = int(v.(int64))
case "https_port":
o.HTTPSPort = int(v.(int64))
case "http_base_path":
o.HTTPBasePath = v.(string)
case "cluster":
err := parseCluster(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "gateway":
if err := parseGateway(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "leaf", "leafnodes":
err := parseLeafNodes(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "store_dir", "storedir":
// Check if JetStream configuration is also setting the storage directory.
if o.StoreDir != "" {
*errors = append(*errors, &configErr{tk, "Duplicate 'store_dir' configuration"})
return
}
o.StoreDir = v.(string)
case "jetstream":
err := parseJetStream(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "logfile", "log_file":
o.LogFile = v.(string)
case "logfile_size_limit", "log_size_limit":
o.LogSizeLimit = v.(int64)
case "syslog":
o.Syslog = v.(bool)
trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog)
case "remote_syslog":
o.RemoteSyslog = v.(string)
case "pidfile", "pid_file":
o.PidFile = v.(string)
case "ports_file_dir":
o.PortsFileDir = v.(string)
case "prof_port":
o.ProfPort = int(v.(int64))
case "max_control_line":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxControlLine = int32(v.(int64))
case "max_payload":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxPayload = int32(v.(int64))
case "max_pending":
o.MaxPending = v.(int64)
case "max_connections", "max_conn":
o.MaxConn = int(v.(int64))
case "max_traced_msg_len":
o.MaxTracedMsgLen = int(v.(int64))
case "max_subscriptions", "max_subs":
o.MaxSubs = int(v.(int64))
case "ping_interval":
o.PingInterval = parseDuration("ping_interval", tk, v, errors, warnings)
case "ping_max":
o.MaxPingsOut = int(v.(int64))
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
return
}
if o.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.TLSTimeout = tc.Timeout
o.TLSMap = tc.Map
o.TLSPinnedCerts = tc.PinnedCerts
// Need to keep track of path of the original TLS config
// and certs path for OCSP Stapling monitoring.
o.tlsConfigOpts = tc
case "ocsp":
switch vv := v.(type) {
case bool:
if vv {
// Default is Auto which honors Must Staple status request
// but does not shutdown the server in case it is revoked,
// letting the client choose whether to trust or not the server.
o.OCSPConfig = &OCSPConfig{Mode: OCSPModeAuto}
} else {
o.OCSPConfig = &OCSPConfig{Mode: OCSPModeNever}
}
case map[string]interface{}:
ocsp := &OCSPConfig{Mode: OCSPModeAuto}
for kk, kv := range vv {
_, v = unwrapValue(kv, &tk)
switch kk {
case "mode":
mode := v.(string)
switch {
case strings.EqualFold(mode, "always"):
ocsp.Mode = OCSPModeAlways
case strings.EqualFold(mode, "must"):
ocsp.Mode = OCSPModeMust
case strings.EqualFold(mode, "never"):
ocsp.Mode = OCSPModeNever
case strings.EqualFold(mode, "auto"):
ocsp.Mode = OCSPModeAuto
default:
*errors = append(*errors, &configErr{tk, fmt.Sprintf("error parsing ocsp config: unsupported ocsp mode %T", mode)})
}
case "urls":
urls := v.([]string)
ocsp.OverrideURLs = urls
case "url":
url := v.(string)
ocsp.OverrideURLs = []string{url}
default:
*errors = append(*errors, &configErr{tk, fmt.Sprintf("error parsing ocsp config: unsupported field %T", kk)})
return
}
}
o.OCSPConfig = ocsp
default:
*errors = append(*errors, &configErr{tk, fmt.Sprintf("error parsing ocsp config: unsupported type %T", v)})
return
}
case "allow_non_tls":
o.AllowNonTLS = v.(bool)
case "write_deadline":
o.WriteDeadline = parseDuration("write_deadline", tk, v, errors, warnings)
case "lame_duck_duration":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_duration: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 30*time.Second {
err := &configErr{tk, fmt.Sprintf("invalid lame_duck_duration of %v, minimum is 30 seconds", dur)}
*errors = append(*errors, err)
return
}
o.LameDuckDuration = dur
case "lame_duck_grace_period":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_grace_period: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 0 {
err := &configErr{tk, "invalid lame_duck_grace_period, needs to be positive"}
*errors = append(*errors, err)
return
}
o.LameDuckGracePeriod = dur
case "operator", "operators", "roots", "root", "root_operators", "root_operator":
opFiles := []string{}
switch v := v.(type) {
case string:
opFiles = append(opFiles, v)
case []string:
opFiles = append(opFiles, v...)
default:
err := &configErr{tk, fmt.Sprintf("error parsing operators: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Assume for now these are file names, but they can also be the JWT itself inline.
o.TrustedOperators = make([]*jwt.OperatorClaims, 0, len(opFiles))
for _, fname := range opFiles {
theJWT, opc, err := readOperatorJWT(fname)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing operator JWT: %v", err)}
*errors = append(*errors, err)
continue
}
o.operatorJWT = append(o.operatorJWT, theJWT)
o.TrustedOperators = append(o.TrustedOperators, opc)
}
if len(o.TrustedOperators) == 1 {
// In case "resolver" is defined as well, it takes precedence
if o.AccountResolver == nil {
if accUrl, err := parseURL(o.TrustedOperators[0].AccountServerURL, "account resolver"); err == nil {
// nsc automatically appends "/accounts" during nsc push
o.AccountResolver, _ = NewURLAccResolver(accUrl.String() + "/accounts")
}
}
// In case "system_account" is defined as well, it takes precedence
if o.SystemAccount == "" {
o.SystemAccount = o.TrustedOperators[0].SystemAccount
}
}
case "resolver", "account_resolver", "accounts_resolver":
switch v := v.(type) {
case string:
// "resolver" takes precedence over value obtained from "operator".
// Clear so that parsing errors are not silently ignored.
o.AccountResolver = nil
memResolverRe := regexp.MustCompile(`(?i)(MEM|MEMORY)\s*`)
resolverRe := regexp.MustCompile(`(?i)(?:URL){1}(?:\({1}\s*"?([^\s"]*)"?\s*\){1})?\s*`)
if memResolverRe.MatchString(v) {
o.AccountResolver = &MemAccResolver{}
} else if items := resolverRe.FindStringSubmatch(v); len(items) == 2 {
url := items[1]
_, err := parseURL(url, "account resolver")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if ur, err := NewURLAccResolver(url); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
} else {
o.AccountResolver = ur
}
}
case map[string]interface{}:
del := false
dir := ""
dirType := ""
limit := int64(0)
ttl := time.Duration(0)
sync := time.Duration(0)
opts := []DirResOption{}
var err error
if v, ok := v["dir"]; ok {
_, v := unwrapValue(v, <)
dir = v.(string)
}
if v, ok := v["type"]; ok {
_, v := unwrapValue(v, <)
dirType = v.(string)
}
if v, ok := v["allow_delete"]; ok {
_, v := unwrapValue(v, <)
del = v.(bool)
}
if v, ok := v["limit"]; ok {
_, v := unwrapValue(v, <)
limit = v.(int64)
}
if v, ok := v["ttl"]; ok {
_, v := unwrapValue(v, <)
ttl, err = time.ParseDuration(v.(string))
}
if v, ok := v["interval"]; err == nil && ok {
_, v := unwrapValue(v, <)
sync, err = time.ParseDuration(v.(string))
}
if v, ok := v["timeout"]; err == nil && ok {
_, v := unwrapValue(v, <)
var to time.Duration
if to, err = time.ParseDuration(v.(string)); err == nil {
opts = append(opts, FetchTimeout(to))
}
}
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if dir == "" {
*errors = append(*errors, &configErr{tk, "dir has no value and needs to point to a directory"})
return
}
if info, _ := os.Stat(dir); info != nil && (!info.IsDir() || info.Mode().Perm()&(1<<(uint(7))) == 0) {
*errors = append(*errors, &configErr{tk, "dir needs to point to an accessible directory"})
return
}
var res AccountResolver
switch strings.ToUpper(dirType) {
case "CACHE":
if sync != 0 {
*errors = append(*errors, &configErr{tk, "CACHE does not accept sync"})
}
if del {
*errors = append(*errors, &configErr{tk, "CACHE does not accept allow_delete"})
}
res, err = NewCacheDirAccResolver(dir, limit, ttl, opts...)
case "FULL":
if ttl != 0 {
*errors = append(*errors, &configErr{tk, "FULL does not accept ttl"})
}
res, err = NewDirAccResolver(dir, limit, sync, del, opts...)
}
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.AccountResolver = res
default:
err := &configErr{tk, fmt.Sprintf("error parsing operator resolver, wrong type %T", v)}
*errors = append(*errors, err)
return
}
if o.AccountResolver == nil {
err := &configErr{tk, "error parsing account resolver, should be MEM or " +
" URL(\"url\") or a map containing dir and type state=[FULL|CACHE])"}
*errors = append(*errors, err)
}
case "resolver_tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
return
}
if o.AccountResolverTLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
case "resolver_preload":
mp, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "preload should be a map of account_public_key:account_jwt"}
*errors = append(*errors, err)
return
}
o.resolverPreloads = make(map[string]string)
for key, val := range mp {
tk, val = unwrapValue(val, <)
if jwtstr, ok := val.(string); !ok {
err := &configErr{tk, "preload map value should be a string JWT"}
*errors = append(*errors, err)
continue
} else {
// Make sure this is a valid account JWT, that is a config error.
// We will warn of expirations, etc later.
if _, err := jwt.DecodeAccountClaims(jwtstr); err != nil {
err := &configErr{tk, "invalid account JWT"}
*errors = append(*errors, err)
continue
}
o.resolverPreloads[key] = jwtstr
}
}
case "no_auth_user":
o.NoAuthUser = v.(string)
case "system_account", "system":
// Already processed at the beginning so we just skip them
// to not treat them as unknown values.
return
case "no_system_account", "no_system", "no_sys_acc":
o.NoSystemAccount = v.(bool)
case "no_header_support":
o.NoHeaderSupport = v.(bool)
case "trusted", "trusted_keys":
switch v := v.(type) {
case string:
o.TrustedKeys = []string{v}
case []string:
o.TrustedKeys = v
case []interface{}:
keys := make([]string, 0, len(v))
for _, mv := range v {
tk, mv = unwrapValue(mv, <)
if key, ok := mv.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type in array %T", mv)}
*errors = append(*errors, err)
continue
}
}
o.TrustedKeys = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Do a quick sanity check on keys
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
err := &configErr{tk, fmt.Sprintf("trust key %q required to be a valid public operator nkey", key)}
*errors = append(*errors, err)
}
}
case "connect_error_reports":
o.ConnectErrorReports = int(v.(int64))
case "reconnect_error_reports":
o.ReconnectErrorReports = int(v.(int64))
case "websocket", "ws":
if err := parseWebsocket(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "mqtt":
if err := parseMQTT(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "server_tags":
var err error
switch v := v.(type) {
case string:
o.Tags.Add(v)
case []string:
o.Tags.Add(v...)
case []interface{}:
for _, t := range v {
if t, ok := t.(token); ok {
if t, ok := t.Value().(string); ok {
o.Tags.Add(t)
continue
} else {
err = &configErr{tk, fmt.Sprintf("error parsing tags: unsupported type %T where string is expected", t)}
}
} else {
err = &configErr{tk, fmt.Sprintf("error parsing tags: unsupported type %T", t)}
}
break
}
default:
err = &configErr{tk, fmt.Sprintf("error parsing tags: unsupported type %T", v)}
}
if err != nil {
*errors = append(*errors, err)
return
}
default:
if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
func parseDuration(field string, tk token, v interface{}, errors *[]error, warnings *[]error) time.Duration {
if wd, ok := v.(string); ok {
if dur, err := time.ParseDuration(wd); err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing %s: %v", field, err)}
*errors = append(*errors, err)
return 0
} else {
return dur
}
} else {
// Backward compatible with old type, assume this is the
// number of seconds.
err := &configWarningErr{
field: field,
configErr: configErr{
token: tk,
reason: field + " should be converted to a duration",
},
}
*warnings = append(*warnings, err)
return time.Duration(v.(int64)) * time.Second
}
}
func trackExplicitVal(opts *Options, pm *map[string]bool, name string, val bool) {
m := *pm
if m == nil {
m = make(map[string]bool)
*pm = m
}
m[name] = val
}
// hostPort is simple struct to hold parsed listen/addr strings.
type hostPort struct {
host string
port int
}
// parseListen will parse listen option which is replacing host/net and port
func parseListen(v interface{}) (*hostPort, error) {
hp := &hostPort{}
switch vv := v.(type) {
// Only a port
case int64:
hp.port = int(vv)
case string:
host, port, err := net.SplitHostPort(vv)
if err != nil {
return nil, fmt.Errorf("could not parse address string %q", vv)
}
hp.port, err = strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("could not parse port %q", port)
}
hp.host = host
default:
return nil, fmt.Errorf("expected port or host:port, got %T", vv)
}
return hp, nil
}
// parseCluster will parse the cluster config.
func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define cluster, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
opts.Cluster.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.Cluster.Host = hp.host
opts.Cluster.Port = hp.port
case "port":
opts.Cluster.Port = int(mv.(int64))
case "host", "net":
opts.Cluster.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
err := &configErr{tk, "Cluster authorization does not allow multiple users"}
*errors = append(*errors, err)
continue
}
opts.Cluster.Username = auth.user
opts.Cluster.Password = auth.pass
opts.Cluster.AuthTimeout = auth.timeout
if auth.defaultPermissions != nil {
err := &configWarningErr{
field: mk,
configErr: configErr{
token: tk,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
}
*warnings = append(*warnings, err)
// Do not set permissions if they were specified in top-level cluster block.
if opts.Cluster.Permissions == nil {
setClusterPermissions(&opts.Cluster, auth.defaultPermissions)
}
}
case "routes":
ra := mv.([]interface{})
routes, errs := parseURLs(ra, "route")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
opts.Routes = routes
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.Cluster.TLSConfig = config
opts.Cluster.TLSTimeout = tlsopts.Timeout
opts.Cluster.TLSMap = tlsopts.Map
opts.Cluster.TLSPinnedCerts = tlsopts.PinnedCerts
opts.Cluster.TLSCheckKnownURLs = tlsopts.TLSCheckKnownURLs
opts.Cluster.tlsConfigOpts = tlsopts
case "cluster_advertise", "advertise":
opts.Cluster.Advertise = mv.(string)
case "no_advertise":
opts.Cluster.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "Cluster.NoAdvertise", opts.Cluster.NoAdvertise)
case "connect_retries":
opts.Cluster.ConnectRetries = int(mv.(int64))
case "permissions":
perms, err := parseUserPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
// Dynamic response permissions do not make sense here.
if perms.Response != nil {
err := &configErr{tk, "Cluster permissions do not support dynamic responses"}
*errors = append(*errors, err)
continue
}
// This will possibly override permissions that were define in auth block
setClusterPermissions(&opts.Cluster, perms)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseURLs(a []interface{}, typ string) (urls []*url.URL, errors []error) {
urls = make([]*url.URL, 0, len(a))
var lt token
defer convertPanicToErrorList(<, &errors)
for _, u := range a {
tk, u := unwrapValue(u, <)
sURL := u.(string)
url, err := parseURL(sURL, typ)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
urls = append(urls, url)
}
return urls, errors
}
func parseURL(u string, typ string) (*url.URL, error) {
urlStr := strings.TrimSpace(u)
url, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("error parsing %s url [%q]", typ, urlStr)
}
return url, nil
}
func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected gateway to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
o.Gateway.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Gateway.Host = hp.host
o.Gateway.Port = hp.port
case "port":
o.Gateway.Port = int(mv.(int64))
case "host", "net":
o.Gateway.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
*errors = append(*errors, &configErr{tk, "Gateway authorization does not allow multiple users"})
continue
}
o.Gateway.Username = auth.user
o.Gateway.Password = auth.pass
o.Gateway.AuthTimeout = auth.timeout
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.TLSConfig = config
o.Gateway.TLSTimeout = tlsopts.Timeout
o.Gateway.TLSMap = tlsopts.Map
o.Gateway.TLSCheckKnownURLs = tlsopts.TLSCheckKnownURLs
o.Gateway.TLSPinnedCerts = tlsopts.PinnedCerts
o.Gateway.tlsConfigOpts = tlsopts
case "advertise":
o.Gateway.Advertise = mv.(string)
case "connect_retries":
o.Gateway.ConnectRetries = int(mv.(int64))
case "gateways":
gateways, err := parseGateways(mv, errors, warnings)
if err != nil {
return err
}
o.Gateway.Gateways = gateways
case "reject_unknown", "reject_unknown_cluster":
o.Gateway.RejectUnknown = mv.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
var dynamicJSAccountLimits = &JetStreamAccountLimits{-1, -1, -1, -1}
// Parses jetstream account limits for an account. Simple setup with boolen is allowed, and we will
// use dynamic account limits.
func parseJetStreamForAccount(v interface{}, acc *Account, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
if vv {
acc.jsLimits = dynamicJSAccountLimits
}
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
acc.jsLimits = dynamicJSAccountLimits
case "disabled", "disable":
acc.jsLimits = nil
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
jsLimits := &JetStreamAccountLimits{-1, -1, -1, -1}
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "max_memory", "max_mem", "mem", "memory":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxMemory = int64(vv)
case "max_store", "max_file", "max_disk", "store", "disk":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStore = int64(vv)
case "max_streams", "streams":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStreams = int(vv)
case "max_consumers", "consumers":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxConsumers = int(vv)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
acc.jsLimits = jsLimits
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// Parse enablement of jetstream for a server.
func parseJetStream(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
opts.JetStream = v.(bool)
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
opts.JetStream = true
case "disabled", "disable":
opts.JetStream = false
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
doEnable := true
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "store", "store_dir", "storedir":
// StoreDir can be set at the top level as well so have to prevent ambiguous declarations.
if opts.StoreDir != "" {
return &configErr{tk, "Duplicate 'store_dir' configuration"}
}
opts.StoreDir = mv.(string)
case "max_memory_store", "max_mem_store", "max_mem":
opts.JetStreamMaxMemory = mv.(int64)
case "max_file_store", "max_file":
opts.JetStreamMaxStore = mv.(int64)
case "domain":
opts.JetStreamDomain = mv.(string)
case "enable", "enabled":
doEnable = mv.(bool)
case "key", "ek", "encryption_key":
opts.JetStreamKey = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
opts.JetStream = doEnable
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// parseLeafNodes will parse the leaf node config.
func parseLeafNodes(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define a leafnode, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.Host = hp.host
opts.LeafNode.Port = hp.port
case "port":
opts.LeafNode.Port = int(mv.(int64))
case "host", "net":
opts.LeafNode.Host = mv.(string)
case "authorization":
auth, err := parseLeafAuthorization(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Username = auth.user
opts.LeafNode.Password = auth.pass
opts.LeafNode.AuthTimeout = auth.timeout
opts.LeafNode.Account = auth.acc
opts.LeafNode.Users = auth.users
// Validate user info config for leafnode authorization
if err := validateLeafNodeAuthOptions(opts); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
case "remotes":
// Parse the remote options here.
remotes, err := parseRemoteLeafNodes(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Remotes = remotes
case "reconnect", "reconnect_delay", "reconnect_interval":
opts.LeafNode.ReconnectInterval = time.Duration(int(mv.(int64))) * time.Second
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if opts.LeafNode.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.TLSTimeout = tc.Timeout
opts.LeafNode.TLSMap = tc.Map
opts.LeafNode.TLSPinnedCerts = tc.PinnedCerts
opts.LeafNode.tlsConfigOpts = tc
case "leafnode_advertise", "advertise":
opts.LeafNode.Advertise = mv.(string)
case "no_advertise":
opts.LeafNode.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "LeafNode.NoAdvertise", opts.LeafNode.NoAdvertise)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// This is the authorization parser adapter for the leafnode's
// authorization config.
func parseLeafAuthorization(v interface{}, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
users, err := parseLeafUsers(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
case "account":
auth.acc = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth, nil
}
// This is a trimmed down version of parseUsers that is adapted
// for the users possibly defined in the authorization{} section
// of leafnodes {}.
func parseLeafUsers(mv interface{}, errors *[]error, warnings *[]error) ([]*User, error) {
var (
tk token
lt token
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
user := &User{}
for k, v := range um {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "account":
// We really want to save just the account name here, but
// the User object is *Account. So we create an account object
// but it won't be registered anywhere. The server will just
// use opts.LeafNode.Users[].Account.Name. Alternatively
// we need to create internal objects to store u/p and account
// name and have a server structure to hold that.
user.Account = NewAccount(v.(string))
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
users = append(users, user)
}
return users, nil
}
func parseRemoteLeafNodes(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteLeafOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
ra, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected remotes field to be an array, got %T", v)}
}
remotes := make([]*RemoteLeafOpts, 0, len(ra))
for _, r := range ra {
tk, r = unwrapValue(r, <)
// Check its a map/struct
rm, ok := r.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode entry to be a map/struct, got %v", r)})
continue
}
remote := &RemoteLeafOpts{}
for k, v := range rm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "no_randomize", "dont_randomize":
remote.NoRandomize = v.(bool)
case "url", "urls":
switch v := v.(type) {
case []interface{}, []string:
urls, errs := parseURLs(v.([]interface{}), "leafnode")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
remote.URLs = urls
case string:
url, err := parseURL(v, "leafnode")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.URLs = append(remote.URLs, url)
default:
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode url to be an array or string, got %v", v)})
continue
}
case "account", "local":
remote.LocalAccount = v.(string)
case "creds", "credentials":
p, err := expandPath(v.(string))
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.Credentials = p
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if remote.TLSConfig, err = GenTLSConfig(tc); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
// If ca_file is defined, GenTLSConfig() sets TLSConfig.ClientCAs.
// Set RootCAs since this tls.Config is used when soliciting
// a connection (therefore behaves as a client).
remote.TLSConfig.RootCAs = remote.TLSConfig.ClientCAs
if tc.Timeout > 0 {
remote.TLSTimeout = tc.Timeout
} else {
remote.TLSTimeout = float64(DEFAULT_LEAF_TLS_TIMEOUT)
}
remote.tlsConfigOpts = tc
case "hub":
remote.Hub = v.(bool)
case "deny_imports", "deny_import":
subjects, err := parsePermSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyImports = subjects
case "deny_exports", "deny_export":
subjects, err := parsePermSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyExports = subjects
case "ws_compress", "ws_compression", "websocket_compress", "websocket_compression":
remote.Websocket.Compression = v.(bool)
case "ws_no_masking", "websocket_no_masking":
remote.Websocket.NoMasking = v.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
remotes = append(remotes, remote)
}
return remotes, nil
}
// Parse TLS and returns a TLSConfig and TLSTimeout.
// Used by cluster and gateway parsing.
func getTLSConfig(tk token) (*tls.Config, *TLSConfigOpts, error) {
tc, err := parseTLS(tk, false)
if err != nil {
return nil, nil, err
}
config, err := GenTLSConfig(tc)
if err != nil {
err := &configErr{tk, err.Error()}
return nil, nil, err
}
// For clusters/gateways, we will force strict verification. We also act
// as both client and server, so will mirror the rootCA to the
// clientCA pool.
config.ClientAuth = tls.RequireAndVerifyClientCert
config.RootCAs = config.ClientCAs
return config, tc, nil
}
func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteGatewayOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Make sure we have an array
ga, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected gateways field to be an array, got %T", v)}
}
gateways := []*RemoteGatewayOpts{}
for _, g := range ga {
tk, g = unwrapValue(g, <)
// Check its a map/struct
gm, ok := g.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected gateway entry to be a map/struct, got %v", g)})
continue
}
gateway := &RemoteGatewayOpts{}
for k, v := range gm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "name":
gateway.Name = v.(string)
case "tls":
tls, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.TLSConfig = tls
gateway.TLSTimeout = tlsopts.Timeout
gateway.tlsConfigOpts = tlsopts
case "url":
url, err := parseURL(v.(string), "gateway")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
gateway.URLs = append(gateway.URLs, url)
case "urls":
urls, errs := parseURLs(v.([]interface{}), "gateway")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
gateway.URLs = urls
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
gateways = append(gateways, gateway)
}
return gateways, nil
}
// Sets cluster's permissions based on given pub/sub permissions,
// doing the appropriate translation.
func setClusterPermissions(opts *ClusterOpts, perms *Permissions) {
// Import is whether or not we will send a SUB for interest to the other side.
// Export is whether or not we will accept a SUB from the remote for a given subject.
// Both only effect interest registration.
// The parsing sets Import into Publish and Export into Subscribe, convert
// accordingly.
opts.Permissions = &RoutePermissions{
Import: perms.Publish,
Export: perms.Subscribe,
}
}
// Temp structures to hold account import and export defintions since they need
// to be processed after being parsed.
type export struct {
acc *Account
sub string
accs []string
rt ServiceRespType
lat *serviceLatency
rthr time.Duration
tPos uint
}
type importStream struct {
acc *Account
an string
sub string
to string
pre string
}
type importService struct {
acc *Account
an string
sub string
to string
share bool
}
// Checks if an account name is reserved.
func isReservedAccount(name string) bool {
return name == globalAccountName
}
func parseAccountMapDest(v interface{}, tk token, errors *[]error, warnings *[]error) (*MapDest, *configErr) {
// These should be maps.
mv, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected an entry for the mapping destination"}
*errors = append(*errors, err)
return nil, err
}
mdest := &MapDest{}
var lt token
var sw bool
for k, v := range mv {
tk, dmv := unwrapValue(v, <)
switch strings.ToLower(k) {
case "dest", "destination":
mdest.Subject = dmv.(string)
case "weight":
switch vv := dmv.(type) {
case string:
ws := vv
ws = strings.TrimSuffix(ws, "%")
weight, err := strconv.Atoi(ws)
if err != nil {
err := &configErr{tk, fmt.Sprintf("Invalid weight %q for mapping destination", ws)}
*errors = append(*errors, err)
return nil, err
}
if weight > 100 || weight < 0 {
err := &configErr{tk, fmt.Sprintf("Invalid weight %d for mapping destination", weight)}
*errors = append(*errors, err)
return nil, err
}
mdest.Weight = uint8(weight)
sw = true
case int64:
weight := vv
if weight > 100 || weight < 0 {
err := &configErr{tk, fmt.Sprintf("Invalid weight %d for mapping destination", weight)}
*errors = append(*errors, err)
return nil, err
}
mdest.Weight = uint8(weight)
sw = true
default:
err := &configErr{tk, fmt.Sprintf("Unknown entry type for weight of %v\n", vv)}
*errors = append(*errors, err)
return nil, err
}
case "cluster":
mdest.Cluster = dmv.(string)
default:
err := &configErr{tk, fmt.Sprintf("Unknown field %q for mapping destination", k)}
*errors = append(*errors, err)
return nil, err
}
}
if !sw {
err := &configErr{tk, fmt.Sprintf("Missing weight for mapping destination %q", mdest.Subject)}
*errors = append(*errors, err)
return nil, err
}
return mdest, nil
}
// parseAccountMappings is called to parse account mappings.
func parseAccountMappings(v interface{}, acc *Account, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
am := v.(map[string]interface{})
for subj, mv := range am {
if !IsValidSubject(subj) {
err := &configErr{tk, fmt.Sprintf("Subject %q is not a valid subject", subj)}
*errors = append(*errors, err)
continue
}
tk, v := unwrapValue(mv, <)
switch vv := v.(type) {
case string:
if err := acc.AddMapping(subj, v.(string)); err != nil {
err := &configErr{tk, fmt.Sprintf("Error adding mapping for %q: %v", subj, err)}
*errors = append(*errors, err)
continue
}
case []interface{}:
var mappings []*MapDest
for _, mv := range v.([]interface{}) {
tk, amv := unwrapValue(mv, <)
mdest, err := parseAccountMapDest(amv, tk, errors, warnings)
if err != nil {
continue
}
mappings = append(mappings, mdest)
}
// Now add them in..
if err := acc.AddWeightedMappings(subj, mappings...); err != nil {
err := &configErr{tk, fmt.Sprintf("Error adding mapping for %q: %v", subj, err)}
*errors = append(*errors, err)
continue
}
case interface{}:
tk, amv := unwrapValue(mv, <)
mdest, err := parseAccountMapDest(amv, tk, errors, warnings)
if err != nil {
continue
}
// Now add it in..
if err := acc.AddWeightedMappings(subj, mdest); err != nil {
err := &configErr{tk, fmt.Sprintf("Error adding mapping for %q: %v", subj, err)}
*errors = append(*errors, err)
continue
}
default:
err := &configErr{tk, fmt.Sprintf("Unknown type %T for mapping destination", vv)}
*errors = append(*errors, err)
continue
}
}
return nil
}
// parseAccounts will parse the different accounts syntax.
func parseAccounts(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var (
importStreams []*importStream
importServices []*importService
exportStreams []*export
exportServices []*export
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch vv := v.(type) {
// Simple array of account names.
case []interface{}, []string:
m := make(map[string]struct{}, len(v.([]interface{})))
for _, n := range v.([]interface{}) {
tk, name := unwrapValue(n, <)
ns := name.(string)
// Check for reserved names.
if isReservedAccount(ns) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", ns)}
*errors = append(*errors, err)
continue
}
if _, ok := m[ns]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate Account Entry: %s", ns)}
*errors = append(*errors, err)
continue
}
opts.Accounts = append(opts.Accounts, NewAccount(ns))
m[ns] = struct{}{}
}
// More common map entry
case map[string]interface{}:
// Track users across accounts, must be unique across
// accounts and nkeys vs users.
uorn := make(map[string]struct{})
for aname, mv := range vv {
tk, amv := unwrapValue(mv, <)
// Skip referenced config vars within the account block.
if tk.IsUsedVariable() {
continue
}
// These should be maps.
mv, ok := amv.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected map entries for accounts"}
*errors = append(*errors, err)
continue
}
if isReservedAccount(aname) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", aname)}
*errors = append(*errors, err)
continue
}
var (
users []*User
nkeyUsr []*NkeyUser
usersTk token
)
acc := NewAccount(aname)
opts.Accounts = append(opts.Accounts, acc)
for k, v := range mv {
tk, mv := unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nk, ok := mv.(string)
if !ok || !nkeys.IsValidPublicAccountKey(nk) {
err := &configErr{tk, fmt.Sprintf("Not a valid public nkey for an account: %q", mv)}
*errors = append(*errors, err)
continue
}
acc.Nkey = nk
case "imports":
streams, services, err := parseAccountImports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
importStreams = append(importStreams, streams...)
importServices = append(importServices, services...)
case "exports":
streams, services, err := parseAccountExports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
exportStreams = append(exportStreams, streams...)
exportServices = append(exportServices, services...)
case "jetstream":
err := parseJetStreamForAccount(mv, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "users":
var err error
usersTk = tk
nkeyUsr, users, err = parseUsers(mv, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "default_permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
acc.defaultPerms = permissions
case "mappings", "maps":
err := parseAccountMappings(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
applyDefaultPermissions(users, nkeyUsr, acc.defaultPerms)
for _, u := range nkeyUsr {
if _, ok := uorn[u.Nkey]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate nkey %q detected", u.Nkey)}
*errors = append(*errors, err)
continue
}
uorn[u.Nkey] = struct{}{}
u.Account = acc
}
opts.Nkeys = append(opts.Nkeys, nkeyUsr...)
for _, u := range users {
if _, ok := uorn[u.Username]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate user %q detected", u.Username)}
*errors = append(*errors, err)
continue
}
uorn[u.Username] = struct{}{}
u.Account = acc
}
opts.Users = append(opts.Users, users...)
}
}
lt = tk
// Bail already if there are previous errors.
if len(*errors) > 0 {
return nil
}
// Parse Imports and Exports here after all accounts defined.
// Do exports first since they need to be defined for imports to succeed
// since we do permissions checks.
// Create a lookup map for accounts lookups.
am := make(map[string]*Account, len(opts.Accounts))
for _, a := range opts.Accounts {
am[a.Name] = a
}
// Do stream exports
for _, stream := range exportStreams {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range stream.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := stream.acc.addStreamExportWithAccountPos(stream.sub, accounts, stream.tPos); err != nil {
msg := fmt.Sprintf("Error adding stream export %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range exportServices {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range service.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := service.acc.addServiceExportWithResponseAndAccountPos(service.sub, service.rt, accounts, service.tPos); err != nil {
msg := fmt.Sprintf("Error adding service export %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.rthr != 0 {
// Response threshold was set in options.
if err := service.acc.SetServiceExportResponseThreshold(service.sub, service.rthr); err != nil {
msg := fmt.Sprintf("Error adding service export response threshold for %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
if service.lat != nil {
// System accounts are on be default so just make sure we have not opted out..
if opts.NoSystemAccount {
msg := fmt.Sprintf("Error adding service latency sampling for %q: %v", service.sub, ErrNoSysAccount.Error())
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.TrackServiceExportWithSampling(service.sub, service.lat.subject, int(service.lat.sampling)); err != nil {
msg := fmt.Sprintf("Error adding service latency sampling for %q on subject %q: %v", service.sub, service.lat.subject, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
}
for _, stream := range importStreams {
ta := am[stream.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream import", stream.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if stream.pre != "" {
if err := stream.acc.AddStreamImport(ta, stream.sub, stream.pre); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
} else {
if err := stream.acc.AddMappedStreamImport(ta, stream.sub, stream.to); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
}
for _, service := range importServices {
ta := am[service.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service import", service.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.to == "" {
service.to = service.sub
}
if err := service.acc.AddServiceImport(ta, service.to, service.sub); err != nil {
msg := fmt.Sprintf("Error adding service import %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.SetServiceImportSharing(ta, service.sub, service.share); err != nil {
msg := fmt.Sprintf("Error setting service import sharing %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
return nil
}
// Parse the account exports
func parseAccountExports(v interface{}, acc *Account, errors, warnings *[]error) ([]*export, []*export, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Exports should be an array, got %T", v)}
}
var services []*export
var streams []*export
for _, v := range ims {
// Should have stream or service
stream, service, err := parseExportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Parse the account imports
func parseAccountImports(v interface{}, acc *Account, errors, warnings *[]error) ([]*importStream, []*importService, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Imports should be an array, got %T", v)}
}
var services []*importService
var streams []*importStream
svcSubjects := map[string]*importService{}
for _, v := range ims {
// Should have stream or service
stream, service, err := parseImportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
if dup := svcSubjects[service.to]; dup != nil {
tk, _ := unwrapValue(v, <)
err := &configErr{tk,
fmt.Sprintf("Duplicate service import subject %q, previously used in import for account %q, subject %q",
service.to, dup.an, dup.sub)}
*errors = append(*errors, err)
continue
}
svcSubjects[service.to] = service
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Helper to parse an embedded account description for imported services or streams.
func parseAccount(v map[string]interface{}, errors, warnings *[]error) (string, string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
var accountName, subject string
for mk, mv := range v {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "account":
accountName = mv.(string)
case "subject":
subject = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return accountName, subject, nil
}
// Parse an export stream or service.
// e.g.
// {stream: "public.>"} # No accounts means public.
// {stream: "synadia.private.>", accounts: [cncf, natsio]}
// {service: "pub.request"} # No accounts means public.
// {service: "pub.special.request", accounts: [nats.io]}
func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*export, *export, error) {
var (
curStream *export
curService *export
accounts []string
rt ServiceRespType
rtSeen bool
rtToken token
lat *serviceLatency
threshSeen bool
thresh time.Duration
latToken token
lt token
accTokPos uint
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
vv, ok := v.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Export Items should be a map with type entry, got %T", v)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream %q but already saw a service", mv)}
*errors = append(*errors, err)
continue
}
if rtToken != nil {
err := &configErr{rtToken, "Detected response directive on non-service"}
*errors = append(*errors, err)
continue
}
if latToken != nil {
err := &configErr{latToken, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected stream name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curStream = &export{sub: mvs}
if accounts != nil {
curStream.accs = accounts
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service %q but already saw a stream", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected service name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curService = &export{sub: mvs}
if accounts != nil {
curService.accs = accounts
}
if rtSeen {
curService.rt = rt
}
if lat != nil {
curService.lat = lat
}
if threshSeen {
curService.rthr = thresh
}
case "response", "response_type":
if rtSeen {
err := &configErr{tk, "Duplicate response type definition"}
*errors = append(*errors, err)
continue
}
rtSeen = true
rtToken = tk
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response type to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
switch strings.ToLower(mvs) {
case "single", "singleton":
rt = Singleton
case "stream":
rt = Streamed
case "chunk", "chunked":
rt = Chunked
default:
err := &configErr{tk, fmt.Sprintf("Unknown response type: %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rt = rt
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "threshold", "response_threshold", "response_max_time", "response_time":
if threshSeen {
err := &configErr{tk, "Duplicate response threshold detected"}
*errors = append(*errors, err)
continue
}
threshSeen = true
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %T", mv)}
*errors = append(*errors, err)
continue
}
var err error
thresh, err = time.ParseDuration(mvs)
if err != nil {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rthr = thresh
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "accounts":
for _, iv := range mv.([]interface{}) {
_, mv := unwrapValue(iv, <)
accounts = append(accounts, mv.(string))
}
if curStream != nil {
curStream.accs = accounts
} else if curService != nil {
curService.accs = accounts
}
case "latency":
latToken = tk
var err error
lat, err = parseServiceLatency(tk, mv)
if err != nil {
*errors = append(*errors, err)
continue
}
if curStream != nil {
err = &configErr{tk, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.lat = lat
}
case "account_token_position":
accTokPos = uint(mv.(int64))
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
if curStream != nil {
curStream.tPos = accTokPos
}
if curService != nil {
curService.tPos = accTokPos
}
return curStream, curService, nil
}
// parseServiceLatency returns a latency config block.
func parseServiceLatency(root token, v interface{}) (l *serviceLatency, retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
if subject, ok := v.(string); ok {
return &serviceLatency{
subject: subject,
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}, nil
}
latency, ok := v.(map[string]interface{})
if !ok {
return nil, &configErr{token: root,
reason: fmt.Sprintf("Expected latency entry to be a map/struct or string, got %T", v)}
}
sl := serviceLatency{
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}
// Read sampling value.
if v, ok := latency["sampling"]; ok {
tk, v := unwrapValue(v, <)
header := false
var sample int64
switch vv := v.(type) {
case int64:
// Sample is an int, like 50.
sample = vv
case string:
// Sample is a string, like "50%".
if strings.ToLower(strings.TrimSpace(vv)) == "headers" {
header = true
sample = 0
break
}
s := strings.TrimSuffix(vv, "%")
n, err := strconv.Atoi(s)
if err != nil {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Failed to parse latency sample: %v", err)}
}
sample = int64(n)
default:
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency sample to be a string or map/struct, got %T", v)}
}
if !header {
if sample < 1 || sample > 100 {
return nil, &configErr{token: tk,
reason: ErrBadSampling.Error()}
}
}
sl.sampling = int8(sample)
}
// Read subject value.
v, ok = latency["subject"]
if !ok {
return nil, &configErr{token: root,
reason: "Latency subject required, but missing"}
}
tk, v := unwrapValue(v, <)
subject, ok := v.(string)
if !ok {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency subject to be a string, got %T", subject)}
}
sl.subject = subject
return &sl, nil
}
// Parse an import stream or service.
// e.g.
// {stream: {account: "synadia", subject:"public.synadia"}, prefix: "imports.synadia"}
// {stream: {account: "synadia", subject:"synadia.private.*"}}
// {service: {account: "synadia", subject: "pub.special.request"}, to: "synadia.request"}
func parseImportStreamOrService(v interface{}, errors, warnings *[]error) (*importStream, *importService, error) {
var (
curStream *importStream
curService *importService
pre, to string
share bool
lt token
)
defer convertPanicToErrorList(<, errors)
tk, mv := unwrapValue(v, <)
vv, ok := mv.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Import Items should be a map with type entry, got %T", mv)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, "Detected stream but already saw a service"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Stream entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curStream = &importStream{an: accountName, sub: subject}
if to != "" {
curStream.to = to
}
if pre != "" {
curStream.pre = pre
}
case "service":
if curStream != nil {
err := &configErr{tk, "Detected service but already saw a stream"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Service entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curService = &importService{an: accountName, sub: subject}
if to != "" {
curService.to = to
} else {
curService.to = subject
}
curService.share = share
case "prefix":
pre = mv.(string)
if curStream != nil {
curStream.pre = pre
}
case "to":
to = mv.(string)
if curService != nil {
curService.to = to
}
if curStream != nil {
curStream.to = to
if curStream.pre != "" {
err := &configErr{tk, "Stream import can not have a 'prefix' and a 'to' property"}
*errors = append(*errors, err)
continue
}
}
case "share":
share = mv.(bool)
if curService != nil {
curService.share = share
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Apply permission defaults to users/nkeyuser that don't have their own.
func applyDefaultPermissions(users []*User, nkeys []*NkeyUser, defaultP *Permissions) {
if defaultP == nil {
return
}
for _, user := range users {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
for _, user := range nkeys {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
}
// Helper function to parse Authorization configs.
func parseAuthorization(v interface{}, opts *Options, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
nkeys, users, err := parseUsers(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
auth.nkeys = nkeys
case "default_permission", "default_permissions", "permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.defaultPermissions = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
applyDefaultPermissions(auth.users, auth.nkeys, auth.defaultPermissions)
}
return auth, nil
}
// Helper function to parse multiple users array with optional permissions.
func parseUsers(mv interface{}, opts *Options, errors *[]error, warnings *[]error) ([]*NkeyUser, []*User, error) {
var (
tk token
lt token
keys []*NkeyUser
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
var (
user = &User{}
nkey = &NkeyUser{}
perms *Permissions
err error
)
for k, v := range um {
// Also needs to unwrap first
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nkey.Nkey = v.(string)
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "permission", "permissions", "authorization":
perms, err = parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "allowed_connection_types", "connection_types", "clients":
cts := parseAllowedConnectionTypes(tk, <, v, errors, warnings)
nkey.AllowedConnectionTypes = cts
user.AllowedConnectionTypes = cts
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
// Place perms if we have them.
if perms != nil {
// nkey takes precedent.
if nkey.Nkey != "" {
nkey.Permissions = perms
} else {
user.Permissions = perms
}
}
// Check to make sure we have at least an nkey or username <password> defined.
if nkey.Nkey == "" && user.Username == "" {
return nil, nil, &configErr{tk, "User entry requires a user"}
} else if nkey.Nkey != "" {
// Make sure the nkey a proper public nkey for a user..
if !nkeys.IsValidPublicUserKey(nkey.Nkey) {
return nil, nil, &configErr{tk, "Not a valid public nkey for a user"}
}
// If we have user or password defined here that is an error.
if user.Username != "" || user.Password != "" {
return nil, nil, &configErr{tk, "Nkey users do not take usernames or passwords"}
}
keys = append(keys, nkey)
} else {
users = append(users, user)
}
}
return keys, users, nil
}
func parseAllowedConnectionTypes(tk token, lt *token, mv interface{}, errors *[]error, warnings *[]error) map[string]struct{} {
cts, err := parseStringArray("allowed connection types", tk, lt, mv, errors, warnings)
// If error, it has already been added to the `errors` array, simply return
if err != nil {
return nil
}
m, err := convertAllowedConnectionTypes(cts)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
}
return m
}
// Helper function to parse user/account permissions
func parseUserPermissions(mv interface{}, errors, warnings *[]error) (*Permissions, error) {
var (
tk token
lt token
p = &Permissions{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
pm, ok := mv.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", mv)}
}
for k, v := range pm {
tk, mv = unwrapValue(v, <)
switch strings.ToLower(k) {
// For routes:
// Import is Publish
// Export is Subscribe
case "pub", "publish", "import":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Publish = perms
case "sub", "subscribe", "export":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Subscribe = perms
case "publish_allow_responses", "allow_responses":
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
// Try boolean first
responses, ok := mv.(bool)
if ok {
if responses {
p.Response = rp
}
} else {
p.Response = parseAllowResponses(v, errors, warnings)
}
if p.Response != nil {
if p.Publish == nil {
p.Publish = &SubjectPermission{}
}
if p.Publish.Allow == nil {
// We turn off the blanket allow statement.
p.Publish.Allow = []string{}
}
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Top level parser for authorization configurations.
func parseVariablePermissions(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
switch vv := v.(type) {
case map[string]interface{}:
// New style with allow and/or deny properties.
return parseSubjectPermission(vv, errors, warnings)
default:
// Old style
return parseOldPermissionStyle(v, errors, warnings)
}
}
// Helper function to parse subject singletons and/or arrays
func parsePermSubjects(v interface{}, errors, warnings *[]error) ([]string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
var subjects []string
switch vv := v.(type) {
case string:
subjects = append(subjects, vv)
case []string:
subjects = vv
case []interface{}:
for _, i := range vv {
tk, i := unwrapValue(i, <)
subject, ok := i.(string)
if !ok {
return nil, &configErr{tk, "Subject in permissions array cannot be cast to string"}
}
subjects = append(subjects, subject)
}
default:
return nil, &configErr{tk, fmt.Sprintf("Expected subject permissions to be a subject, or array of subjects, got %T", v)}
}
if err := checkPermSubjectArray(subjects); err != nil {
return nil, &configErr{tk, err.Error()}
}
return subjects, nil
}
// Helper function to parse a ResponsePermission.
func parseAllowResponses(v interface{}, errors, warnings *[]error) *ResponsePermission {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Check if this is a map.
pm, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "error parsing response permissions, expected a boolean or a map"}
*errors = append(*errors, err)
return nil
}
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
for k, v := range pm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "max", "max_msgs", "max_messages", "max_responses":
max := int(v.(int64))
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if max != 0 {
rp.MaxMsgs = max
}
case "expires", "expiration", "ttl":
wd, ok := v.(string)
if ok {
ttl, err := time.ParseDuration(wd)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing expires: %v", err)}
*errors = append(*errors, err)
return nil
}
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if ttl != 0 {
rp.Expires = ttl
}
} else {
err := &configErr{tk, "error parsing expires, not a duration string"}
*errors = append(*errors, err)
return nil
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return rp
}
// Helper function to parse old style authorization configs.
func parseOldPermissionStyle(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
subjects, err := parsePermSubjects(v, errors, warnings)
if err != nil {
return nil, err
}
return &SubjectPermission{Allow: subjects}, nil
}
// Helper function to parse new style authorization into a SubjectPermission with Allow and Deny.
func parseSubjectPermission(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
var lt token
defer convertPanicToErrorList(<, errors)
m := v.(map[string]interface{})
if len(m) == 0 {
return nil, nil
}
p := &SubjectPermission{}
for k, v := range m {
tk, _ := unwrapValue(v, <)
switch strings.ToLower(k) {
case "allow":
subjects, err := parsePermSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Allow = subjects
case "deny":
subjects, err := parsePermSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Deny = subjects
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field name %q parsing subject permissions, only 'allow' or 'deny' are permitted", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Helper function to validate permissions subjects.
func checkPermSubjectArray(sa []string) error {
for _, s := range sa {
if !IsValidSubject(s) {
// Check here if this is a queue group qualified subject.
elements := strings.Fields(s)
if len(elements) != 2 {
return fmt.Errorf("subject %q is not a valid subject", s)
} else if !IsValidSubject(elements[0]) {
return fmt.Errorf("subject %q is not a valid subject", elements[0])
}
}
}
return nil
}
// PrintTLSHelpAndDie prints TLS usage and exits.
func PrintTLSHelpAndDie() {
fmt.Printf("%s", tlsUsage)
for k := range cipherMap {
fmt.Printf(" %s\n", k)
}
fmt.Printf("\nAvailable curve preferences include:\n")
for k := range curvePreferenceMap {
fmt.Printf(" %s\n", k)
}
os.Exit(0)
}
func parseCipher(cipherName string) (uint16, error) {
cipher, exists := cipherMap[cipherName]
if !exists {
return 0, fmt.Errorf("unrecognized cipher %s", cipherName)
}
return cipher, nil
}
func parseCurvePreferences(curveName string) (tls.CurveID, error) {
curve, exists := curvePreferenceMap[curveName]
if !exists {
return 0, fmt.Errorf("unrecognized curve preference %s", curveName)
}
return curve, nil
}
// Helper function to parse TLS configs.
func parseTLS(v interface{}, isClientCtx bool) (t *TLSConfigOpts, retErr error) {
var (
tlsm map[string]interface{}
tc = TLSConfigOpts{}
lt token
)
defer convertPanicToError(<, &retErr)
_, v = unwrapValue(v, <)
tlsm = v.(map[string]interface{})
for mk, mv := range tlsm {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "cert_file":
certFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'cert_file' to be filename"}
}
tc.CertFile = certFile
case "key_file":
keyFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'key_file' to be filename"}
}
tc.KeyFile = keyFile
case "ca_file":
caFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'ca_file' to be filename"}
}
tc.CaFile = caFile
case "insecure":
insecure, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'insecure' to be a boolean"}
}
tc.Insecure = insecure
case "verify":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify' to be a boolean"}
}
tc.Verify = verify
case "verify_and_map":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify_and_map' to be a boolean"}
}
if verify {
tc.Verify = verify
}
tc.Map = verify
case "verify_cert_and_check_known_urls":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify_cert_and_check_known_urls' to be a boolean"}
}
if verify && isClientCtx {
return nil, &configErr{tk, "verify_cert_and_check_known_urls not supported in this context"}
}
if verify {
tc.Verify = verify
}
tc.TLSCheckKnownURLs = verify
case "cipher_suites":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'cipher_suites' cannot be empty"}
}
tc.Ciphers = make([]uint16, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cipher, err := parseCipher(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.Ciphers = append(tc.Ciphers, cipher)
}
case "curve_preferences":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'curve_preferences' cannot be empty"}
}
tc.CurvePreferences = make([]tls.CurveID, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cps, err := parseCurvePreferences(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.CurvePreferences = append(tc.CurvePreferences, cps)
}
case "timeout":
at := float64(0)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
tc.Timeout = at
case "pinned_certs":
ra, ok := mv.([]interface{})
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'pinned_certs' to be a list of hex-encoded sha256 of DER encoded SubjectPublicKeyInfo"}
}
if len(ra) != 0 {
wl := PinnedCertSet{}
re := regexp.MustCompile("^[A-Fa-f0-9]{64}$")
for _, r := range ra {
tk, r := unwrapValue(r, <)
entry := strings.ToLower(r.(string))
if !re.MatchString(entry) {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, 'pinned_certs' key %s does not look like hex-encoded sha256 of DER encoded SubjectPublicKeyInfo", entry)}
}
wl[entry] = struct{}{}
}
tc.PinnedCerts = wl
}
default:
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)}
}
}
// If cipher suites were not specified then use the defaults
if tc.Ciphers == nil {
tc.Ciphers = defaultCipherSuites()
}
// If curve preferences were not specified, then use the defaults
if tc.CurvePreferences == nil {
tc.CurvePreferences = defaultCurvePreferences()
}
return &tc, nil
}
func parseSimpleAuth(v interface{}, errors *[]error, warnings *[]error) *authorization {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth
}
func parseStringArray(fieldName string, tk token, lt *token, mv interface{}, errors *[]error, warnings *[]error) ([]string, error) {
switch mv := mv.(type) {
case string:
return []string{mv}, nil
case []interface{}:
strs := make([]string, 0, len(mv))
for _, val := range mv {
tk, val = unwrapValue(val, lt)
if str, ok := val.(string); ok {
strs = append(strs, str)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing %s: unsupported type in array %T", fieldName, val)}
*errors = append(*errors, err)
continue
}
}
return strs, nil
default:
err := &configErr{tk, fmt.Sprintf("error parsing %s: unsupported type %T", fieldName, mv)}
*errors = append(*errors, err)
return nil, err
}
}
func parseWebsocket(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected websocket to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.Host = hp.host
o.Websocket.Port = hp.port
case "port":
o.Websocket.Port = int(mv.(int64))
case "host", "net":
o.Websocket.Host = mv.(string)
case "advertise":
o.Websocket.Advertise = mv.(string)
case "no_tls":
o.Websocket.NoTLS = mv.(bool)
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if o.Websocket.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.TLSMap = tc.Map
o.Websocket.TLSPinnedCerts = tc.PinnedCerts
case "same_origin":
o.Websocket.SameOrigin = mv.(bool)
case "allowed_origins", "allowed_origin", "allow_origins", "allow_origin", "origins", "origin":
o.Websocket.AllowedOrigins, _ = parseStringArray("allowed origins", tk, <, mv, errors, warnings)
case "handshake_timeout":
ht := time.Duration(0)
switch mv := mv.(type) {
case int64:
ht = time.Duration(mv) * time.Second
case string:
var err error
ht, err = time.ParseDuration(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
default:
err := &configErr{tk, fmt.Sprintf("error parsing handshake timeout: unsupported type %T", mv)}
*errors = append(*errors, err)
}
o.Websocket.HandshakeTimeout = ht
case "compress", "compression":
o.Websocket.Compression = mv.(bool)
case "authorization", "authentication":
auth := parseSimpleAuth(tk, errors, warnings)
o.Websocket.Username = auth.user
o.Websocket.Password = auth.pass
o.Websocket.Token = auth.token
o.Websocket.AuthTimeout = auth.timeout
case "jwt_cookie":
o.Websocket.JWTCookie = mv.(string)
case "no_auth_user":
o.Websocket.NoAuthUser = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseMQTT(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected mqtt to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.MQTT.Host = hp.host
o.MQTT.Port = hp.port
case "port":
o.MQTT.Port = int(mv.(int64))
case "host", "net":
o.MQTT.Host = mv.(string)
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if o.MQTT.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.MQTT.TLSTimeout = tc.Timeout
o.MQTT.TLSMap = tc.Map
o.MQTT.TLSPinnedCerts = tc.PinnedCerts
case "authorization", "authentication":
auth := parseSimpleAuth(tk, errors, warnings)
o.MQTT.Username = auth.user
o.MQTT.Password = auth.pass
o.MQTT.Token = auth.token
o.MQTT.AuthTimeout = auth.timeout
case "no_auth_user":
o.MQTT.NoAuthUser = mv.(string)
case "ack_wait", "ackwait":
o.MQTT.AckWait = parseDuration("ack_wait", tk, mv, errors, warnings)
case "max_ack_pending", "max_pending", "max_inflight":
tmp := int(mv.(int64))
if tmp < 0 || tmp > 0xFFFF {
err := &configErr{tk, fmt.Sprintf("invalid value %v, should in [0..%d] range", tmp, 0xFFFF)}
*errors = append(*errors, err)
} else {
o.MQTT.MaxAckPending = uint16(tmp)
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// GenTLSConfig loads TLS related configuration parameters.
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
// Create the tls.Config from our options before including the certs.
// It will determine the cipher suites that we prefer.
// FIXME(dlc) change if ARM based.
config := tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: tc.Ciphers,
PreferServerCipherSuites: true,
CurvePreferences: tc.CurvePreferences,
InsecureSkipVerify: tc.Insecure,
}
switch {
case tc.CertFile != "" && tc.KeyFile == "":
return nil, fmt.Errorf("missing 'key_file' in TLS configuration")
case tc.CertFile == "" && tc.KeyFile != "":
return nil, fmt.Errorf("missing 'cert_file' in TLS configuration")
case tc.CertFile != "" && tc.KeyFile != "":
// Now load in cert and private key
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
}
config.Certificates = []tls.Certificate{cert}
}
// Require client certificates as needed
if tc.Verify {
config.ClientAuth = tls.RequireAndVerifyClientCert
}
// Add in CAs if applicable.
if tc.CaFile != "" {
rootPEM, err := ioutil.ReadFile(tc.CaFile)
if err != nil || rootPEM == nil {
return nil, err
}
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return nil, fmt.Errorf("failed to parse root ca certificate")
}
config.ClientCAs = pool
}
return &config, nil
}
// MergeOptions will merge two options giving preference to the flagOpts
// if the item is present.
func MergeOptions(fileOpts, flagOpts *Options) *Options {
if fileOpts == nil {
return flagOpts
}
if flagOpts == nil {
return fileOpts
}
// Merge the two, flagOpts override
opts := *fileOpts
if flagOpts.Port != 0 {
opts.Port = flagOpts.Port
}
if flagOpts.Host != "" {
opts.Host = flagOpts.Host
}
if flagOpts.ClientAdvertise != "" {
opts.ClientAdvertise = flagOpts.ClientAdvertise
}
if flagOpts.Username != "" {
opts.Username = flagOpts.Username
}
if flagOpts.Password != "" {
opts.Password = flagOpts.Password
}
if flagOpts.Authorization != "" {
opts.Authorization = flagOpts.Authorization
}
if flagOpts.HTTPPort != 0 {
opts.HTTPPort = flagOpts.HTTPPort
}
if flagOpts.HTTPBasePath != "" {
opts.HTTPBasePath = flagOpts.HTTPBasePath
}
if flagOpts.Debug {
opts.Debug = true
}
if flagOpts.Trace {
opts.Trace = true
}
if flagOpts.Logtime {
opts.Logtime = true
}
if flagOpts.LogFile != "" {
opts.LogFile = flagOpts.LogFile
}
if flagOpts.PidFile != "" {
opts.PidFile = flagOpts.PidFile
}
if flagOpts.PortsFileDir != "" {
opts.PortsFileDir = flagOpts.PortsFileDir
}
if flagOpts.ProfPort != 0 {
opts.ProfPort = flagOpts.ProfPort
}
if flagOpts.Cluster.ListenStr != "" {
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
}
if flagOpts.Cluster.NoAdvertise {
opts.Cluster.NoAdvertise = true
}
if flagOpts.Cluster.ConnectRetries != 0 {
opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries
}
if flagOpts.Cluster.Advertise != "" {
opts.Cluster.Advertise = flagOpts.Cluster.Advertise
}
if flagOpts.RoutesStr != "" {
mergeRoutes(&opts, flagOpts)
}
return &opts
}
// RoutesFromStr parses route URLs from a string
func RoutesFromStr(routesStr string) []*url.URL {
routes := strings.Split(routesStr, ",")
if len(routes) == 0 {
return nil
}
routeUrls := []*url.URL{}
for _, r := range routes {
r = strings.TrimSpace(r)
u, _ := url.Parse(r)
routeUrls = append(routeUrls, u)
}
return routeUrls
}
// This will merge the flag routes and override anything that was present.
func mergeRoutes(opts, flagOpts *Options) {
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
if routeUrls == nil {
return
}
opts.Routes = routeUrls
opts.RoutesStr = flagOpts.RoutesStr
}
// RemoveSelfReference removes this server from an array of routes
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
var cleanRoutes []*url.URL
cport := strconv.Itoa(clusterPort)
selfIPs, err := getInterfaceIPs()
if err != nil {
return nil, err
}
for _, r := range routes {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
return nil, err
}
ipList, err := getURLIP(host)
if err != nil {
return nil, err
}
if cport == port && isIPInList(selfIPs, ipList) {
continue
}
cleanRoutes = append(cleanRoutes, r)
}
return cleanRoutes, nil
}
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
for _, ip1 := range list1 {
for _, ip2 := range list2 {
if ip1.Equal(ip2) {
return true
}
}
}
return false
}
func getURLIP(ipStr string) ([]net.IP, error) {
ipList := []net.IP{}
ip := net.ParseIP(ipStr)
if ip != nil {
ipList = append(ipList, ip)
return ipList, nil
}
hostAddr, err := net.LookupHost(ipStr)
if err != nil {
return nil, fmt.Errorf("Error looking up host with route hostname: %v", err)
}
for _, addr := range hostAddr {
ip = net.ParseIP(addr)
if ip != nil {
ipList = append(ipList, ip)
}
}
return ipList, nil
}
func getInterfaceIPs() ([]net.IP, error) {
var localIPs []net.IP
interfaceAddr, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Error getting self referencing address: %v", err)
}
for i := 0; i < len(interfaceAddr); i++ {
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
if net.ParseIP(interfaceIP.String()) != nil {
localIPs = append(localIPs, interfaceIP)
} else {
return nil, fmt.Errorf("Error parsing self referencing address: %v", err)
}
}
return localIPs, nil
}
func setBaselineOptions(opts *Options) {
// Setup non-standard Go defaults
if opts.Host == "" {
opts.Host = DEFAULT_HOST
}
if opts.HTTPHost == "" {
// Default to same bind from server if left undefined
opts.HTTPHost = opts.Host
}
if opts.Port == 0 {
opts.Port = DEFAULT_PORT
} else if opts.Port == RANDOM_PORT {
// Choose randomly inside of net.Listen
opts.Port = 0
}
if opts.MaxConn == 0 {
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
}
if opts.PingInterval == 0 {
opts.PingInterval = DEFAULT_PING_INTERVAL
}
if opts.MaxPingsOut == 0 {
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
}
if opts.TLSTimeout == 0 {
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.AuthTimeout == 0 {
opts.AuthTimeout = getDefaultAuthTimeout(opts.TLSConfig, opts.TLSTimeout)
}
if opts.Cluster.Port != 0 {
if opts.Cluster.Host == "" {
opts.Cluster.Host = DEFAULT_HOST
}
if opts.Cluster.TLSTimeout == 0 {
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.AuthTimeout == 0 {
opts.Cluster.AuthTimeout = getDefaultAuthTimeout(opts.Cluster.TLSConfig, opts.Cluster.TLSTimeout)
}
}
if opts.LeafNode.Port != 0 {
if opts.LeafNode.Host == "" {
opts.LeafNode.Host = DEFAULT_HOST
}
if opts.LeafNode.TLSTimeout == 0 {
opts.LeafNode.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.LeafNode.AuthTimeout == 0 {
opts.LeafNode.AuthTimeout = getDefaultAuthTimeout(opts.LeafNode.TLSConfig, opts.LeafNode.TLSTimeout)
}
}
// Set baseline connect port for remotes.
for _, r := range opts.LeafNode.Remotes {
if r != nil {
for _, u := range r.URLs {
if u.Port() == "" {
u.Host = net.JoinHostPort(u.Host, strconv.Itoa(DEFAULT_LEAFNODE_PORT))
}
}
}
}
// Set this regardless of opts.LeafNode.Port
if opts.LeafNode.ReconnectInterval == 0 {
opts.LeafNode.ReconnectInterval = DEFAULT_LEAF_NODE_RECONNECT
}
if opts.MaxControlLine == 0 {
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
}
if opts.MaxPayload == 0 {
opts.MaxPayload = MAX_PAYLOAD_SIZE
}
if opts.MaxPending == 0 {
opts.MaxPending = MAX_PENDING_SIZE
}
if opts.WriteDeadline == time.Duration(0) {
opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE
}
if opts.MaxClosedClients == 0 {
opts.MaxClosedClients = DEFAULT_MAX_CLOSED_CLIENTS
}
if opts.LameDuckDuration == 0 {
opts.LameDuckDuration = DEFAULT_LAME_DUCK_DURATION
}
if opts.LameDuckGracePeriod == 0 {
opts.LameDuckGracePeriod = DEFAULT_LAME_DUCK_GRACE_PERIOD
}
if opts.Gateway.Port != 0 {
if opts.Gateway.Host == "" {
opts.Gateway.Host = DEFAULT_HOST
}
if opts.Gateway.TLSTimeout == 0 {
opts.Gateway.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Gateway.AuthTimeout == 0 {
opts.Gateway.AuthTimeout = getDefaultAuthTimeout(opts.Gateway.TLSConfig, opts.Gateway.TLSTimeout)
}
}
if opts.ConnectErrorReports == 0 {
opts.ConnectErrorReports = DEFAULT_CONNECT_ERROR_REPORTS
}
if opts.ReconnectErrorReports == 0 {
opts.ReconnectErrorReports = DEFAULT_RECONNECT_ERROR_REPORTS
}
if opts.Websocket.Port != 0 {
if opts.Websocket.Host == "" {
opts.Websocket.Host = DEFAULT_HOST
}
}
if opts.MQTT.Port != 0 {
if opts.MQTT.Host == "" {
opts.MQTT.Host = DEFAULT_HOST
}
if opts.MQTT.TLSTimeout == 0 {
opts.MQTT.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
}
// JetStream
if opts.JetStreamMaxMemory == 0 {
opts.JetStreamMaxMemory = -1
}
if opts.JetStreamMaxStore == 0 {
opts.JetStreamMaxStore = -1
}
}
func getDefaultAuthTimeout(tls *tls.Config, tlsTimeout float64) float64 {
var authTimeout float64
if tls != nil {
authTimeout = tlsTimeout + 1.0
} else {
authTimeout = float64(AUTH_TIMEOUT / time.Second)
}
return authTimeout
}
// ConfigureOptions accepts a flag set and augments it with NATS Server
// specific flags. On success, an options structure is returned configured
// based on the selected flags and/or configuration file.
// The command line options take precedence to the ones in the configuration file.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {
opts := &Options{}
var (
showVersion bool
showHelp bool
showTLSHelp bool
signal string
configFile string
dbgAndTrace bool
trcAndVerboseTrc bool
dbgAndTrcAndVerboseTrc bool
err error
)
fs.BoolVar(&showHelp, "h", false, "Show this message.")
fs.BoolVar(&showHelp, "help", false, "Show this message.")
fs.IntVar(&opts.Port, "port", 0, "Port to listen on.")
fs.IntVar(&opts.Port, "p", 0, "Port to listen on.")
fs.StringVar(&opts.ServerName, "n", "", "Server name.")
fs.StringVar(&opts.ServerName, "name", "", "Server name.")
fs.StringVar(&opts.ServerName, "server_name", "", "Server name.")
fs.StringVar(&opts.Host, "addr", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "a", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "net", "", "Network host to listen on.")
fs.StringVar(&opts.ClientAdvertise, "client_advertise", "", "Client URL to advertise to other servers.")
fs.BoolVar(&opts.Debug, "D", false, "Enable Debug logging.")
fs.BoolVar(&opts.Debug, "debug", false, "Enable Debug logging.")
fs.BoolVar(&opts.Trace, "V", false, "Enable Trace logging.")
fs.BoolVar(&trcAndVerboseTrc, "VV", false, "Enable Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Trace, "trace", false, "Enable Trace logging.")
fs.BoolVar(&dbgAndTrace, "DV", false, "Enable Debug and Trace logging.")
fs.BoolVar(&dbgAndTrcAndVerboseTrc, "DVV", false, "Enable Debug and Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.")
fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.")
fs.StringVar(&opts.Username, "user", "", "Username required for connection.")
fs.StringVar(&opts.Password, "pass", "", "Password required for connection.")
fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.")
fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "https_port", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.StringVar(&configFile, "c", "", "Configuration file.")
fs.StringVar(&configFile, "config", "", "Configuration file.")
fs.BoolVar(&opts.CheckConfig, "t", false, "Check configuration and exit.")
fs.StringVar(&signal, "sl", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&signal, "signal", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&opts.PidFile, "P", "", "File to store process pid.")
fs.StringVar(&opts.PidFile, "pid", "", "File to store process pid.")
fs.StringVar(&opts.PortsFileDir, "ports_file_dir", "", "Creates a ports file in the specified directory (<executable_name>_<pid>.ports).")
fs.StringVar(&opts.LogFile, "l", "", "File to store logging output.")
fs.StringVar(&opts.LogFile, "log", "", "File to store logging output.")
fs.Int64Var(&opts.LogSizeLimit, "log_size_limit", 0, "Logfile size limit being auto-rotated")
fs.BoolVar(&opts.Syslog, "s", false, "Enable syslog as log method.")
fs.BoolVar(&opts.Syslog, "syslog", false, "Enable syslog as log method.")
fs.StringVar(&opts.RemoteSyslog, "r", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.StringVar(&opts.RemoteSyslog, "remote_syslog", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.BoolVar(&showVersion, "version", false, "Print version information.")
fs.BoolVar(&showVersion, "v", false, "Print version information.")
fs.IntVar(&opts.ProfPort, "profile", 0, "Profiling HTTP port.")
fs.StringVar(&opts.RoutesStr, "routes", "", "Routes to actively solicit a connection.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster_listen", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.Advertise, "cluster_advertise", "", "Cluster URL to advertise to other servers.")
fs.BoolVar(&opts.Cluster.NoAdvertise, "no_advertise", false, "Advertise known cluster IPs to clients.")
fs.IntVar(&opts.Cluster.ConnectRetries, "connect_retries", 0, "For implicit routes, number of connect retries.")
fs.StringVar(&opts.Cluster.Name, "cluster_name", "", "Cluster Name, if not set one will be dynamically generated.")
fs.BoolVar(&showTLSHelp, "help_tls", false, "TLS help.")
fs.BoolVar(&opts.TLS, "tls", false, "Enable TLS.")
fs.BoolVar(&opts.TLSVerify, "tlsverify", false, "Enable TLS with client verification.")
fs.StringVar(&opts.TLSCert, "tlscert", "", "Server certificate file.")
fs.StringVar(&opts.TLSKey, "tlskey", "", "Private key for server certificate.")
fs.StringVar(&opts.TLSCaCert, "tlscacert", "", "Client certificate CA for verification.")
fs.IntVar(&opts.MaxTracedMsgLen, "max_traced_msg_len", 0, "Maximum printable length for traced messages. 0 for unlimited.")
fs.BoolVar(&opts.JetStream, "js", false, "Enable JetStream.")
fs.BoolVar(&opts.JetStream, "jetstream", false, "Enable JetStream.")
fs.StringVar(&opts.StoreDir, "sd", "", "Storage directory.")
fs.StringVar(&opts.StoreDir, "store_dir", "", "Storage directory.")
// The flags definition above set "default" values to some of the options.
// Calling Parse() here will override the default options with any value
// specified from the command line. This is ok. We will then update the
// options with the content of the configuration file (if present), and then,
// call Parse() again to override the default+config with command line values.
// Calling Parse() before processing config file is necessary since configFile
// itself is a command line argument, and also Parse() is required in order
// to know if user wants simply to show "help" or "version", etc...
if err := fs.Parse(args); err != nil {
return nil, err
}
if showVersion {
printVersion()
return nil, nil
}
if showHelp {
printHelp()
return nil, nil
}
if showTLSHelp {
printTLSHelp()
return nil, nil
}
// Process args looking for non-flag options,
// 'version' and 'help' only for now
showVersion, showHelp, err = ProcessCommandLineArgs(fs)
if err != nil {
return nil, err
} else if showVersion {
printVersion()
return nil, nil
} else if showHelp {
printHelp()
return nil, nil
}
// Snapshot flag options.
FlagSnapshot = opts.Clone()
// Keep track of the boolean flags that were explicitly set with their value.
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "DVV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", dbgAndTrcAndVerboseTrc)
case "DV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrace)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrace)
case "D":
fallthrough
case "debug":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", FlagSnapshot.Debug)
case "VV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", trcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", trcAndVerboseTrc)
case "V":
fallthrough
case "trace":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", FlagSnapshot.Trace)
case "T":
fallthrough
case "logtime":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Logtime", FlagSnapshot.Logtime)
case "s":
fallthrough
case "syslog":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Syslog", FlagSnapshot.Syslog)
case "no_advertise":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Cluster.NoAdvertise", FlagSnapshot.Cluster.NoAdvertise)
}
})
// Process signal control.
if signal != _EMPTY_ {
if err := processSignal(signal); err != nil {
return nil, err
}
}
// Parse config if given
if configFile != _EMPTY_ {
// This will update the options with values from the config file.
err := opts.ProcessConfigFile(configFile)
if err != nil {
if opts.CheckConfig {
return nil, err
}
if cerr, ok := err.(*processConfigErr); !ok || len(cerr.Errors()) != 0 {
return nil, err
}
// If we get here we only have warnings and can still continue
fmt.Fprint(os.Stderr, err)
} else if opts.CheckConfig {
// Report configuration file syntax test was successful and exit.
return opts, nil
}
// Call this again to override config file options with options from command line.
// Note: We don't need to check error here since if there was an error, it would
// have been caught the first time this function was called (after setting up the
// flags).
fs.Parse(args)
} else if opts.CheckConfig {
return nil, fmt.Errorf("must specify [-c, --config] option to check configuration file syntax")
}
// Special handling of some flags
var (
flagErr error
tlsDisabled bool
tlsOverride bool
)
fs.Visit(func(f *flag.Flag) {
// short-circuit if an error was encountered
if flagErr != nil {
return
}
if strings.HasPrefix(f.Name, "tls") {
if f.Name == "tls" {
if !opts.TLS {
// User has specified "-tls=false", we need to disable TLS
opts.TLSConfig = nil
tlsDisabled = true
tlsOverride = false
return
}
tlsOverride = true
} else if !tlsDisabled {
tlsOverride = true
}
} else {
switch f.Name {
case "VV":
opts.Trace, opts.TraceVerbose = trcAndVerboseTrc, trcAndVerboseTrc
case "DVV":
opts.Trace, opts.Debug, opts.TraceVerbose = dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc
case "DV":
// Check value to support -DV=false
opts.Trace, opts.Debug = dbgAndTrace, dbgAndTrace
case "cluster", "cluster_listen":
// Override cluster config if explicitly set via flags.
flagErr = overrideCluster(opts)
case "routes":
// Keep in mind that the flag has updated opts.RoutesStr at this point.
if opts.RoutesStr == "" {
// Set routes array to nil since routes string is empty
opts.Routes = nil
return
}
routeUrls := RoutesFromStr(opts.RoutesStr)
opts.Routes = routeUrls
}
}
})
if flagErr != nil {
return nil, flagErr
}
// This will be true if some of the `-tls` params have been set and
// `-tls=false` has not been set.
if tlsOverride {
if err := overrideTLS(opts); err != nil {
return nil, err
}
}
// If we don't have cluster defined in the configuration
// file and no cluster listen string override, but we do
// have a routes override, we need to report misconfiguration.
if opts.RoutesStr != "" && opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 {
return nil, errors.New("solicited routes require cluster capabilities, e.g. --cluster")
}
return opts, nil
}
func normalizeBasePath(p string) string {
if len(p) == 0 {
return "/"
}
// add leading slash
if p[0] != '/' {
p = "/" + p
}
return path.Clean(p)
}
// overrideTLS is called when at least "-tls=true" has been set.
func overrideTLS(opts *Options) error {
if opts.TLSCert == "" {
return errors.New("TLS Server certificate must be present and valid")
}
if opts.TLSKey == "" {
return errors.New("TLS Server private key must be present and valid")
}
tc := TLSConfigOpts{}
tc.CertFile = opts.TLSCert
tc.KeyFile = opts.TLSKey
tc.CaFile = opts.TLSCaCert
tc.Verify = opts.TLSVerify
tc.Ciphers = defaultCipherSuites()
var err error
opts.TLSConfig, err = GenTLSConfig(&tc)
return err
}
// overrideCluster updates Options.Cluster if that flag "cluster" (or "cluster_listen")
// has explicitly be set in the command line. If it is set to empty string, it will
// clear the Cluster options.
func overrideCluster(opts *Options) error {
if opts.Cluster.ListenStr == "" {
// This one is enough to disable clustering.
opts.Cluster.Port = 0
return nil
}
// -1 will fail url.Parse, so if we have -1, change it to
// 0, and then after parse, replace the port with -1 so we get
// automatic port allocation
wantsRandom := false
if strings.HasSuffix(opts.Cluster.ListenStr, ":-1") {
wantsRandom = true
cls := fmt.Sprintf("%s:0", opts.Cluster.ListenStr[0:len(opts.Cluster.ListenStr)-3])
opts.Cluster.ListenStr = cls
}
clusterURL, err := url.Parse(opts.Cluster.ListenStr)
if err != nil {
return err
}
h, p, err := net.SplitHostPort(clusterURL.Host)
if err != nil {
return err
}
if wantsRandom {
p = "-1"
}
opts.Cluster.Host = h
_, err = fmt.Sscan(p, &opts.Cluster.Port)
if err != nil {
return err
}
if clusterURL.User != nil {
pass, hasPassword := clusterURL.User.Password()
if !hasPassword {
return errors.New("expected cluster password to be set")
}
opts.Cluster.Password = pass
user := clusterURL.User.Username()
opts.Cluster.Username = user
} else {
// Since we override from flag and there is no user/pwd, make
// sure we clear what we may have gotten from config file.
opts.Cluster.Username = ""
opts.Cluster.Password = ""
}
return nil
}
func processSignal(signal string) error {
var (
pid string
commandAndPid = strings.Split(signal, "=")
)
if l := len(commandAndPid); l == 2 {
pid = maybeReadPidFile(commandAndPid[1])
} else if l > 2 {
return fmt.Errorf("invalid signal parameters: %v", commandAndPid[2:])
}
if err := ProcessSignal(Command(commandAndPid[0]), pid); err != nil {
return err
}
os.Exit(0)
return nil
}
// maybeReadPidFile returns a PID or Windows service name obtained via the following method:
// 1. Try to open a file with path "pidStr" (absolute or relative).
// 2. If such a file exists and can be read, return its contents.
// 3. Otherwise, return the original "pidStr" string.
func maybeReadPidFile(pidStr string) string {
if b, err := ioutil.ReadFile(pidStr); err == nil {
return string(b)
}
return pidStr
}
func homeDir() (string, error) {
if runtime.GOOS == "windows" {
homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
userProfile := os.Getenv("USERPROFILE")
home := filepath.Join(homeDrive, homePath)
if homeDrive == "" || homePath == "" {
if userProfile == "" {
return "", errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%")
}
home = userProfile
}
return home, nil
}
home := os.Getenv("HOME")
if home == "" {
return "", errors.New("failed to get home dir, require $HOME")
}
return home, nil
}
func expandPath(p string) (string, error) {
p = os.ExpandEnv(p)
if !strings.HasPrefix(p, "~") {
return p, nil
}
home, err := homeDir()
if err != nil {
return "", err
}
return filepath.Join(home, p[1:]), nil
}
| 1 | 13,669 | Instead of adding a new option, I wonder if we could decide on a port that would disable listening. For instance, port set to 0 means that we use default port 4222. Setting to -1 means that we let OS pick a random free port. We could say anything negative lower than -1 (say -2) means disabled? You don't have to update the PR for that, just wondering if that would be better or not and let's see what others think. | nats-io-nats-server | go |
@@ -2511,6 +2511,9 @@ class ThriftRequestHandler(object):
if report.metadata:
return report.metadata.get("analyzer", {}).get("name")
+ if report.check_name.startswith('clang-diagnostic-'):
+ return 'clang-tidy'
+
# Processing PList files.
_, _, report_files = next(os.walk(report_dir), ([], [], []))
all_report_checkers = set() | 1 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Handle Thrift requests.
"""
import base64
from collections import defaultdict
from datetime import datetime, timedelta
import os
import re
import shlex
import tempfile
import time
import zipfile
import zlib
import sqlalchemy
from sqlalchemy.sql.expression import or_, and_, not_, func, \
asc, desc, union_all, select, bindparam, literal_column, cast
import codechecker_api_shared
from codechecker_api.codeCheckerDBAccess_v6 import constants, ttypes
from codechecker_api.codeCheckerDBAccess_v6.ttypes import BugPathPos, \
CheckerCount, CommentData, DiffType, Encoding, RunHistoryData, Order, \
ReportData, ReportDetails, ReviewData, RunData, RunFilter, \
RunReportCount, RunSortType, RunTagCount, SourceComponentData, \
SourceFileData, SortMode, SortType
from codechecker_common import plist_parser, skiplist_handler
from codechecker_common.source_code_comment_handler import \
SourceCodeCommentHandler, SpellException, contains_codechecker_comment
from codechecker_common import util
from codechecker_common.logger import get_logger
from codechecker_report_hash.hash import get_report_path_hash
from codechecker_web.shared import webserver_context
from codechecker_web.shared import convert
from codechecker_server.profiler import timeit
from .. import permissions
from ..database import db_cleanup
from ..database.config_db_model import Product
from ..database.database import conv, DBSession, escape_like
from ..database.run_db_model import \
AnalyzerStatistic, Report, ReviewStatus, File, Run, RunHistory, \
RunLock, Comment, BugPathEvent, BugReportPoint, \
FileContent, SourceComponent, ExtendedReportData
from ..metadata import MetadataInfoParser
from ..tmp import TemporaryDirectory
from .thrift_enum_helper import detection_status_enum, \
detection_status_str, review_status_enum, review_status_str, \
report_extended_data_type_enum
from . import store_handler
LOG = get_logger('server')
class CommentKindValue(object):
USER = 0
SYSTEM = 1
def comment_kind_from_thrift_type(kind):
""" Convert the given comment kind from Thrift type to Python enum. """
if kind == ttypes.CommentKind.USER:
return CommentKindValue.USER
elif kind == ttypes.CommentKind.SYSTEM:
return CommentKindValue.SYSTEM
def comment_kind_to_thrift_type(kind):
""" Convert the given comment kind from Python enum to Thrift type. """
if kind == CommentKindValue.USER:
return ttypes.CommentKind.USER
elif kind == CommentKindValue.SYSTEM:
return ttypes.CommentKind.SYSTEM
def verify_limit_range(limit):
"""Verify limit value for the queries.
Query limit should not be larger than the max allowed value.
Max is returned if the value is larger than max.
"""
max_query_limit = constants.MAX_QUERY_SIZE
if not limit:
return max_query_limit
if limit > max_query_limit:
LOG.warning('Query limit %d was larger than max query limit %d, '
'setting limit to %d',
limit,
max_query_limit,
max_query_limit)
limit = max_query_limit
return limit
def slugify(text):
"""
Removes and replaces special characters in a given text.
"""
# Removes non-alpha characters.
norm_text = re.sub(r'[^\w\s\-/]', '', text)
# Converts spaces and slashes to underscores.
norm_text = re.sub(r'([\s]+|[/]+)', '_', norm_text)
return norm_text
def exc_to_thrift_reqfail(func):
"""
Convert internal exceptions to RequestFailed exception
which can be sent back on the thrift connections.
"""
func_name = func.__name__
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
return res
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
# Convert SQLAlchemy exceptions.
msg = str(alchemy_ex)
LOG.warning("%s:\n%s", func_name, msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
except codechecker_api_shared.ttypes.RequestFailed as rf:
LOG.warning(rf.message)
raise
except Exception as ex:
msg = str(ex)
LOG.warning(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL, msg)
return wrapper
def parse_codechecker_review_comment(source_file_name,
report_line,
checker_name):
"""Parse the CodeChecker review comments from a source file at a given
position. Returns an empty list if there are no comments.
"""
src_comment_data = []
with open(source_file_name,
encoding='utf-8',
errors='ignore') as sf:
if contains_codechecker_comment(sf):
sc_handler = SourceCodeCommentHandler()
try:
src_comment_data = sc_handler.filter_source_line_comments(
sf,
report_line,
checker_name)
except SpellException as ex:
LOG.warning(f"File {source_file_name} contains {ex}")
return src_comment_data
def get_component_values(session, component_name):
"""
Get component values by component names and returns a tuple where the
first item contains a list path which should be skipped and the second
item contains a list of path which should be included.
E.g.:
+/a/b/x.cpp
+/a/b/y.cpp
-/a/b
On the above component value this function will return the following:
(['/a/b'], ['/a/b/x.cpp', '/a/b/y.cpp'])
"""
components = session.query(SourceComponent) \
.filter(SourceComponent.name.like(component_name)) \
.all()
skip = []
include = []
for component in components:
values = component.value.decode('utf-8').split('\n')
for value in values:
value = value.strip()
if not value:
continue
v = value[1:]
if value[0] == '+':
include.append(v)
elif value[0] == '-':
skip.append(v)
return skip, include
def process_report_filter(session, run_ids, report_filter, cmp_data=None):
"""
Process the new report filter.
"""
AND = []
cmp_filter_expr = process_cmp_data_filter(session, run_ids, report_filter,
cmp_data)
if cmp_filter_expr is not None:
AND.append(cmp_filter_expr)
if report_filter is None:
return and_(*AND)
if report_filter.filepath:
OR = [File.filepath.ilike(conv(fp))
for fp in report_filter.filepath]
AND.append(or_(*OR))
if report_filter.checkerMsg:
OR = [Report.checker_message.ilike(conv(cm))
for cm in report_filter.checkerMsg]
AND.append(or_(*OR))
if report_filter.checkerName:
OR = [Report.checker_id.ilike(conv(cn))
for cn in report_filter.checkerName]
AND.append(or_(*OR))
if report_filter.analyzerNames:
OR = [Report.analyzer_name.ilike(conv(an))
for an in report_filter.analyzerNames]
AND.append(or_(*OR))
if report_filter.runName:
OR = [Run.name.ilike(conv(rn))
for rn in report_filter.runName]
AND.append(or_(*OR))
if report_filter.reportHash:
OR = []
no_joker = []
for rh in report_filter.reportHash:
if '*' in rh:
OR.append(Report.bug_id.ilike(conv(rh)))
else:
no_joker.append(rh)
if no_joker:
OR.append(Report.bug_id.in_(no_joker))
AND.append(or_(*OR))
if report_filter.severity:
AND.append(Report.severity.in_(report_filter.severity))
if report_filter.detectionStatus:
dst = list(map(detection_status_str,
report_filter.detectionStatus))
AND.append(Report.detection_status.in_(dst))
if report_filter.reviewStatus:
OR = [ReviewStatus.status.in_(
list(map(review_status_str, report_filter.reviewStatus)))]
# No database entry for unreviewed reports
if (ttypes.ReviewStatus.UNREVIEWED in
report_filter.reviewStatus):
OR.append(ReviewStatus.status.is_(None))
AND.append(or_(*OR))
if report_filter.firstDetectionDate is not None:
date = datetime.fromtimestamp(report_filter.firstDetectionDate)
AND.append(Report.detected_at >= date)
if report_filter.fixDate is not None:
date = datetime.fromtimestamp(report_filter.fixDate)
AND.append(Report.detected_at < date)
if report_filter.date:
detected_at = report_filter.date.detected
if detected_at:
if detected_at.before:
detected_before = datetime.fromtimestamp(detected_at.before)
AND.append(Report.detected_at <= detected_before)
if detected_at.after:
detected_after = datetime.fromtimestamp(detected_at.after)
AND.append(Report.detected_at >= detected_after)
fixed_at = report_filter.date.fixed
if fixed_at:
if fixed_at.before:
fixed_before = datetime.fromtimestamp(fixed_at.before)
AND.append(Report.fixed_at <= fixed_before)
if fixed_at.after:
fixed_after = datetime.fromtimestamp(fixed_at.after)
AND.append(Report.fixed_at >= fixed_after)
if report_filter.runHistoryTag:
OR = []
for history_date in report_filter.runHistoryTag:
date = datetime.strptime(history_date,
'%Y-%m-%d %H:%M:%S.%f')
OR.append(and_(Report.detected_at <= date, or_(
Report.fixed_at.is_(None), Report.fixed_at >= date)))
AND.append(or_(*OR))
if report_filter.componentNames:
OR = []
for component_name in report_filter.componentNames:
skip, include = get_component_values(session, component_name)
if skip and include:
include_q = select([File.id]) \
.where(or_(*[
File.filepath.like(conv(fp)) for fp in include])) \
.distinct()
skip_q = select([File.id]) \
.where(or_(*[
File.filepath.like(conv(fp)) for fp in skip])) \
.distinct()
OR.append(or_(File.id.in_(
include_q.except_(skip_q))))
elif include:
include_q = [File.filepath.like(conv(fp)) for fp in include]
OR.append(or_(*include_q))
elif skip:
skip_q = [not_(File.filepath.like(conv(fp))) for fp in skip]
OR.append(and_(*skip_q))
AND.append(or_(*OR))
if report_filter.bugPathLength is not None:
min_path_length = report_filter.bugPathLength.min
if min_path_length is not None:
AND.append(Report.path_length >= min_path_length)
max_path_length = report_filter.bugPathLength.max
if max_path_length is not None:
AND.append(Report.path_length <= max_path_length)
filter_expr = and_(*AND)
return filter_expr
def get_open_reports_date_filter_query(tbl=Report, date=RunHistory.time):
""" Get open reports date filter. """
return and_(tbl.detected_at <= date,
or_(tbl.fixed_at.is_(None),
tbl.fixed_at > date))
def get_diff_bug_id_query(session, run_ids, tag_ids, open_reports_date):
""" Get bug id query for diff. """
q = session.query(Report.bug_id.distinct())
if run_ids:
q = q.filter(Report.run_id.in_(run_ids))
if tag_ids:
q = q.outerjoin(RunHistory,
RunHistory.run_id == Report.run_id) \
.filter(RunHistory.id.in_(tag_ids)) \
.filter(get_open_reports_date_filter_query())
if open_reports_date:
date = datetime.fromtimestamp(open_reports_date)
q = q.filter(get_open_reports_date_filter_query(Report, date))
return q
def get_diff_run_id_query(session, run_ids, tag_ids):
""" Get run id query for diff. """
q = session.query(Run.id.distinct())
if run_ids:
q = q.filter(Run.id.in_(run_ids))
if tag_ids:
q = q.outerjoin(RunHistory,
RunHistory.run_id == Run.id) \
.filter(RunHistory.id.in_(tag_ids))
return q
def is_cmp_data_empty(cmp_data):
""" True if the parameter is None or no filter fields are set. """
if not cmp_data:
return True
return not any([cmp_data.runIds,
cmp_data.runTag,
cmp_data.openReportsDate])
def process_cmp_data_filter(session, run_ids, report_filter, cmp_data):
""" Process compare data filter. """
base_tag_ids = report_filter.runTag if report_filter else None
base_open_reports_date = report_filter.openReportsDate \
if report_filter else None
query_base = get_diff_bug_id_query(session, run_ids, base_tag_ids,
base_open_reports_date)
query_base_runs = get_diff_run_id_query(session, run_ids, base_tag_ids)
if is_cmp_data_empty(cmp_data):
if not run_ids and (not report_filter or not report_filter.runTag):
return None
return and_(Report.bug_id.in_(query_base),
Report.run_id.in_(query_base_runs))
query_new = get_diff_bug_id_query(session, cmp_data.runIds,
cmp_data.runTag,
cmp_data.openReportsDate)
query_new_runs = get_diff_run_id_query(session, cmp_data.runIds,
cmp_data.runTag)
AND = []
if cmp_data.diffType == DiffType.NEW:
return and_(Report.bug_id.in_(query_new.except_(query_base)),
Report.run_id.in_(query_new_runs))
elif cmp_data.diffType == DiffType.RESOLVED:
return and_(Report.bug_id.in_(query_base.except_(query_new)),
Report.run_id.in_(query_base_runs))
elif cmp_data.diffType == DiffType.UNRESOLVED:
return and_(Report.bug_id.in_(query_base.intersect(query_new)),
Report.run_id.in_(query_new_runs))
else:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
'Unsupported diff type: ' + str(cmp_data.diffType))
return and_(*AND)
def process_run_history_filter(query, run_ids, run_history_filter):
"""
Process run history filter.
"""
if run_ids:
query = query.filter(RunHistory.run_id.in_(run_ids))
if run_history_filter and run_history_filter.tagNames:
OR = [RunHistory.version_tag.ilike('{0}'.format(conv(
escape_like(name, '\\'))), escape='\\') for
name in run_history_filter.tagNames]
query = query.filter(or_(*OR))
if run_history_filter and run_history_filter.tagIds:
query = query.filter(RunHistory.id.in_(run_history_filter.tagIds))
return query
def process_run_filter(session, query, run_filter):
"""
Process run filter.
"""
if run_filter is None:
return query
if run_filter.ids:
query = query.filter(Run.id.in_(run_filter.ids))
if run_filter.names:
if run_filter.exactMatch:
query = query.filter(Run.name.in_(run_filter.names))
else:
OR = [Run.name.ilike('{0}'.format(conv(
escape_like(name, '\\'))), escape='\\') for
name in run_filter.names]
query = query.filter(or_(*OR))
if run_filter.beforeTime:
date = datetime.fromtimestamp(run_filter.beforeTime)
query = query.filter(Run.date < date)
if run_filter.afterTime:
date = datetime.fromtimestamp(run_filter.afterTime)
query = query.filter(Run.date > date)
if run_filter.beforeRun:
run = session.query(Run.date) \
.filter(Run.name == run_filter.beforeRun) \
.one_or_none()
if run:
query = query.filter(Run.date < run.date)
if run_filter.afterRun:
run = session.query(Run.date) \
.filter(Run.name == run_filter.afterRun) \
.one_or_none()
if run:
query = query.filter(Run.date > run.date)
return query
def get_report_details(session, report_ids):
"""
Returns report details for the given report ids.
"""
details = {}
# Get bug path events.
bug_path_events = session.query(BugPathEvent, File.filepath) \
.filter(BugPathEvent.report_id.in_(report_ids)) \
.outerjoin(File,
File.id == BugPathEvent.file_id) \
.order_by(BugPathEvent.report_id, BugPathEvent.order)
bug_events_list = defaultdict(list)
for event, file_path in bug_path_events:
report_id = event.report_id
event = bugpathevent_db_to_api(event)
event.filePath = file_path
bug_events_list[report_id].append(event)
# Get bug report points.
bug_report_points = session.query(BugReportPoint, File.filepath) \
.filter(BugReportPoint.report_id.in_(report_ids)) \
.outerjoin(File,
File.id == BugReportPoint.file_id) \
.order_by(BugReportPoint.report_id, BugReportPoint.order)
bug_point_list = defaultdict(list)
for bug_point, file_path in bug_report_points:
report_id = bug_point.report_id
bug_point = bugreportpoint_db_to_api(bug_point)
bug_point.filePath = file_path
bug_point_list[report_id].append(bug_point)
# Get extended report data.
extended_data_list = defaultdict(list)
q = session.query(ExtendedReportData, File.filepath) \
.filter(ExtendedReportData.report_id.in_(report_ids)) \
.outerjoin(File,
File.id == ExtendedReportData.file_id)
for data, file_path in q:
report_id = data.report_id
extended_data = extended_data_db_to_api(data)
extended_data.filePath = file_path
extended_data_list[report_id].append(extended_data)
for report_id in report_ids:
details[report_id] = \
ReportDetails(pathEvents=bug_events_list[report_id],
executionPath=bug_point_list[report_id],
extendedData=extended_data_list[report_id])
return details
def bugpathevent_db_to_api(bpe):
return ttypes.BugPathEvent(
startLine=bpe.line_begin,
startCol=bpe.col_begin,
endLine=bpe.line_end,
endCol=bpe.col_end,
msg=bpe.msg,
fileId=bpe.file_id)
def bugreportpoint_db_to_api(brp):
return BugPathPos(
startLine=brp.line_begin,
startCol=brp.col_begin,
endLine=brp.line_end,
endCol=brp.col_end,
fileId=brp.file_id)
def extended_data_db_to_api(erd):
return ttypes.ExtendedReportData(
type=report_extended_data_type_enum(erd.type),
startLine=erd.line_begin,
startCol=erd.col_begin,
endLine=erd.line_end,
endCol=erd.col_end,
message=erd.message,
fileId=erd.file_id)
def unzip(b64zip, output_dir):
"""
This function unzips the base64 encoded zip file. This zip is extracted
to a temporary directory and the ZIP is then deleted. The function returns
the size of the extracted zip file.
"""
with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file:
LOG.debug("Unzipping mass storage ZIP '%s' to '%s'...",
zip_file.name, output_dir)
zip_file.write(zlib.decompress(base64.b64decode(b64zip)))
with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zipf:
try:
zipf.extractall(output_dir)
return os.stat(zip_file.name).st_size
except Exception:
LOG.error("Failed to extract received ZIP.")
import traceback
traceback.print_exc()
raise
return 0
def create_review_data(review_status):
if review_status:
return ReviewData(status=review_status_enum(review_status.status),
comment=review_status.message.decode('utf-8'),
author=review_status.author,
date=str(review_status.date))
else:
return ReviewData(status=ttypes.ReviewStatus.UNREVIEWED)
def create_count_expression(report_filter):
if report_filter is not None and report_filter.isUnique:
return func.count(Report.bug_id.distinct())
else:
return func.count(literal_column('*'))
def apply_report_filter(q, filter_expression):
"""
Applies the given filter expression and joins the File and ReviewStatus
tables.
"""
q = q.outerjoin(File,
Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression)
return q
def get_sort_map(sort_types, is_unique=False):
# Get a list of sort_types which will be a nested ORDER BY.
sort_type_map = {
SortType.FILENAME: [(File.filepath, 'filepath'),
(Report.line, 'line')],
SortType.BUG_PATH_LENGTH: [(Report.path_length, 'bug_path_length')],
SortType.CHECKER_NAME: [(Report.checker_id, 'checker_id')],
SortType.SEVERITY: [(Report.severity, 'severity')],
SortType.REVIEW_STATUS: [(ReviewStatus.status, 'rw_status')],
SortType.DETECTION_STATUS: [(Report.detection_status, 'dt_status')]}
if is_unique:
sort_type_map[SortType.FILENAME] = [(File.filename, 'filename')]
sort_type_map[SortType.DETECTION_STATUS] = []
# Mapping the SQLAlchemy functions.
order_type_map = {Order.ASC: asc, Order.DESC: desc}
if sort_types is None:
sort_types = [SortMode(SortType.SEVERITY, Order.DESC)]
return sort_types, sort_type_map, order_type_map
def sort_results_query(query, sort_types, sort_type_map, order_type_map,
order_by_label=False):
"""
Helper method for __queryDiffResults and queryResults to apply sorting.
"""
for sort in sort_types:
sorttypes = sort_type_map.get(sort.type)
for sorttype in sorttypes:
order_type = order_type_map.get(sort.ord)
sort_col = sorttype[1] if order_by_label else sorttype[0]
query = query.order_by(order_type(sort_col))
return query
def filter_unresolved_reports(q):
"""
Filter reports which are unresolved.
Note: review status of these reports are not in skip_review_statuses
and detection statuses are not in skip_detection_statuses.
"""
skip_review_statuses = ['false_positive', 'intentional']
skip_detection_statuses = ['resolved', 'off', 'unavailable']
return q.filter(Report.detection_status.notin_(skip_detection_statuses)) \
.filter(or_(ReviewStatus.status.is_(None),
ReviewStatus.status.notin_(skip_review_statuses))) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id)
def check_remove_runs_lock(session, run_ids):
"""
Check if there is an existing lock on the given runs, which has not
expired yet. If so, the run cannot be deleted, as someone is assumed to
be storing into it.
"""
locks_expired_at = datetime.now() - timedelta(
seconds=db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE)
run_locks = session.query(RunLock.name) \
.filter(RunLock.locked_at >= locks_expired_at)
if run_ids:
run_locks = run_locks.filter(Run.id.in_(run_ids))
run_locks = run_locks \
.outerjoin(Run,
Run.name == RunLock.name) \
.all()
if run_locks:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"Can not remove results because the following runs "
"are locked: {0}".format(
', '.join([r[0] for r in run_locks])))
def sort_run_data_query(query, sort_mode):
"""
Sort run data query by the given sort type.
"""
# Sort by run date by default.
if not sort_mode:
return query.order_by(desc(Run.date))
order_type_map = {Order.ASC: asc, Order.DESC: desc}
order_type = order_type_map.get(sort_mode.ord)
if sort_mode.type == RunSortType.NAME:
query = query.order_by(order_type(Run.name))
elif sort_mode.type == RunSortType.UNRESOLVED_REPORTS:
query = query.order_by(order_type('report_count'))
elif sort_mode.type == RunSortType.DATE:
query = query.order_by(order_type(Run.date))
elif sort_mode.type == RunSortType.DURATION:
query = query.order_by(order_type(Run.duration))
elif sort_mode.type == RunSortType.CC_VERSION:
query = query.order_by(order_type(RunHistory.cc_version))
return query
def escape_whitespaces(s, whitespaces=None):
if not whitespaces:
whitespaces = [' ', '\n', '\t', '\r']
escaped = s
for w in whitespaces:
escaped = escaped.replace(w, '\\{0}'.format(w))
return escaped
class ThriftRequestHandler(object):
"""
Connect to database and handle thrift client requests.
"""
def __init__(self,
manager,
Session,
product,
auth_session,
config_database,
checker_md_docs,
checker_md_docs_map,
package_version,
context):
if not product:
raise ValueError("Cannot initialize request handler without "
"a product to serve.")
self.__manager = manager
self.__product = product
self.__auth_session = auth_session
self.__config_database = config_database
self.__checker_md_docs = checker_md_docs
self.__checker_doc_map = checker_md_docs_map
self.__package_version = package_version
self.__Session = Session
self.__context = context
self.__permission_args = {
'productID': product.id
}
def __get_username(self):
"""
Returns the actually logged in user name.
"""
return self.__auth_session.user if self.__auth_session else "Anonymous"
def __require_permission(self, required):
"""
Helper method to raise an UNAUTHORIZED exception if the user does not
have any of the given permissions.
"""
with DBSession(self.__config_database) as session:
args = dict(self.__permission_args)
args['config_db_session'] = session
if not any([permissions.require_permission(
perm, args, self.__auth_session)
for perm in required]):
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
"You are not authorized to execute this action.")
return True
def __require_admin(self):
self.__require_permission([permissions.PRODUCT_ADMIN])
def __require_access(self):
self.__require_permission([permissions.PRODUCT_ACCESS])
def __require_store(self):
self.__require_permission([permissions.PRODUCT_STORE])
def __add_comment(self, bug_id, message, kind=CommentKindValue.USER):
""" Creates a new comment object. """
user = self.__get_username()
return Comment(bug_id,
user,
message.encode('utf-8'),
kind,
datetime.now())
@timeit
def getRunData(self, run_filter, limit, offset, sort_mode):
self.__require_access()
limit = verify_limit_range(limit)
with DBSession(self.__Session) as session:
# Count the reports subquery.
stmt = session.query(Report.run_id,
func.count(Report.bug_id)
.label('report_count'))
stmt = filter_unresolved_reports(stmt) \
.group_by(Report.run_id).subquery()
tag_q = session.query(RunHistory.run_id,
func.max(RunHistory.id).label(
'run_history_id'),
func.max(RunHistory.time).label(
'run_history_time')) \
.group_by(RunHistory.run_id) \
.subquery()
q = session.query(Run.id,
Run.date,
Run.name,
Run.duration,
RunHistory.version_tag,
RunHistory.cc_version,
RunHistory.description,
stmt.c.report_count)
q = process_run_filter(session, q, run_filter)
q = q.outerjoin(stmt, Run.id == stmt.c.run_id) \
.outerjoin(tag_q, Run.id == tag_q.c.run_id) \
.outerjoin(RunHistory,
RunHistory.id == tag_q.c.run_history_id) \
.group_by(Run.id,
RunHistory.version_tag,
RunHistory.cc_version,
RunHistory.description,
stmt.c.report_count)
q = sort_run_data_query(q, sort_mode)
if limit:
q = q.limit(limit).offset(offset)
# Get the runs.
run_data = q.all()
# Set run ids filter by using the previous results.
if not run_filter:
run_filter = RunFilter()
run_filter.ids = [r[0] for r in run_data]
# Get report count for each detection statuses.
status_q = session.query(Report.run_id,
Report.detection_status,
func.count(Report.bug_id))
if run_filter and run_filter.ids is not None:
status_q = status_q.filter(Report.run_id.in_(run_filter.ids))
status_q = status_q.group_by(Report.run_id,
Report.detection_status)
status_sum = defaultdict(defaultdict)
for run_id, status, count in status_q:
status_sum[run_id][detection_status_enum(status)] = count
# Get analyzer statistics.
analyzer_statistics = defaultdict(lambda: defaultdict())
stat_q = session.query(AnalyzerStatistic,
Run.id)
if run_filter and run_filter.ids is not None:
stat_q = stat_q.filter(Run.id.in_(run_filter.ids))
stat_q = stat_q \
.outerjoin(RunHistory,
RunHistory.id == AnalyzerStatistic.run_history_id) \
.outerjoin(Run,
Run.id == RunHistory.run_id)
for stat, run_id in stat_q:
analyzer_statistics[run_id][stat.analyzer_type] = \
ttypes.AnalyzerStatistics(failed=stat.failed,
successful=stat.successful)
results = []
for run_id, run_date, run_name, duration, tag, cc_version, \
description, report_count \
in run_data:
if report_count is None:
report_count = 0
analyzer_stats = analyzer_statistics[run_id]
results.append(RunData(runId=run_id,
runDate=str(run_date),
name=run_name,
duration=duration,
resultCount=report_count,
detectionStatusCount=status_sum[run_id],
versionTag=tag,
codeCheckerVersion=cc_version,
analyzerStatistics=analyzer_stats,
description=description))
return results
@exc_to_thrift_reqfail
@timeit
def getRunCount(self, run_filter):
self.__require_access()
with DBSession(self.__Session) as session:
query = session.query(Run.id)
query = process_run_filter(session, query, run_filter)
return query.count()
def getCheckCommand(self, run_history_id, run_id):
self.__require_access()
if not run_history_id and not run_id:
return ""
with DBSession(self.__Session) as session:
query = session.query(RunHistory.check_command)
if run_history_id:
query = query.filter(RunHistory.id == run_history_id)
elif run_id:
query = query.filter(RunHistory.run_id == run_id) \
.order_by(RunHistory.time.desc()) \
.limit(1)
history = query.first()
if not history or not history[0]:
return ""
return zlib.decompress(history[0]).decode('utf-8')
@exc_to_thrift_reqfail
@timeit
def getRunHistory(self, run_ids, limit, offset, run_history_filter):
self.__require_access()
limit = verify_limit_range(limit)
with DBSession(self.__Session) as session:
res = session.query(RunHistory)
res = process_run_history_filter(res, run_ids, run_history_filter)
res = res.order_by(RunHistory.time.desc())
if limit:
res = res.limit(limit).offset(offset)
results = []
for history in res:
analyzer_statistics = {}
for stat in history.analyzer_statistics:
analyzer_statistics[stat.analyzer_type] = \
ttypes.AnalyzerStatistics(
failed=stat.failed,
successful=stat.successful)
results.append(RunHistoryData(
id=history.id,
runId=history.run.id,
runName=history.run.name,
versionTag=history.version_tag,
user=history.user,
time=str(history.time),
codeCheckerVersion=history.cc_version,
analyzerStatistics=analyzer_statistics,
description=history.description))
return results
@exc_to_thrift_reqfail
@timeit
def getRunHistoryCount(self, run_ids, run_history_filter):
self.__require_access()
with DBSession(self.__Session) as session:
query = session.query(RunHistory.id)
query = process_run_history_filter(query,
run_ids,
run_history_filter)
return query.count()
@exc_to_thrift_reqfail
@timeit
def getReport(self, reportId):
self.__require_access()
with DBSession(self.__Session) as session:
result = session.query(Report,
File,
ReviewStatus) \
.filter(Report.id == reportId) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.limit(1).one_or_none()
if not result:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"Report " + str(reportId) + " not found!")
report, source_file, review_status = result
return ReportData(
runId=report.run_id,
bugHash=report.bug_id,
checkedFile=source_file.filepath,
checkerMsg=report.checker_message,
reportId=report.id,
fileId=source_file.id,
line=report.line,
column=report.column,
checkerId=report.checker_id,
severity=report.severity,
reviewData=create_review_data(review_status),
detectionStatus=detection_status_enum(report.detection_status),
detectedAt=str(report.detected_at),
fixedAt=str(report.fixed_at) if report.fixed_at else None)
@exc_to_thrift_reqfail
@timeit
def getDiffResultsHash(self, run_ids, report_hashes, diff_type,
skip_detection_statuses):
self.__require_access()
if not skip_detection_statuses:
skip_detection_statuses = [ttypes.DetectionStatus.RESOLVED,
ttypes.DetectionStatus.OFF,
ttypes.DetectionStatus.UNAVAILABLE]
# Convert statuses to string.
skip_statuses_str = [detection_status_str(status)
for status in skip_detection_statuses]
with DBSession(self.__Session) as session:
if diff_type == DiffType.NEW:
# In postgresql we can select multiple rows filled with
# constants by using `unnest` function. In sqlite we have to
# use multiple UNION ALL.
if not report_hashes:
return []
base_hashes = session.query(Report.bug_id.label('bug_id')) \
.outerjoin(File, Report.file_id == File.id) \
.filter(Report.detection_status.notin_(skip_statuses_str))
if run_ids:
base_hashes = \
base_hashes.filter(Report.run_id.in_(run_ids))
if self.__product.driver_name == 'postgresql':
new_hashes = select([func.unnest(report_hashes)
.label('bug_id')]) \
.except_(base_hashes).alias('new_bugs')
return [res[0] for res in session.query(new_hashes)]
else:
# The maximum number of compound select in sqlite is 500
# by default. We increased SQLITE_MAX_COMPOUND_SELECT
# limit but when the number of compound select was larger
# than 8435 sqlite threw a `Segmentation fault` error.
# For this reason we create queries with chunks.
new_hashes = []
chunk_size = 500
for chunk in [report_hashes[i:i + chunk_size] for
i in range(0, len(report_hashes),
chunk_size)]:
new_hashes_query = union_all(*[
select([bindparam('bug_id' + str(i), h)
.label('bug_id')])
for i, h in enumerate(chunk)])
q = select([new_hashes_query]).except_(base_hashes)
new_hashes.extend([res[0] for res in session.query(q)])
return new_hashes
elif diff_type == DiffType.RESOLVED:
results = session.query(Report.bug_id) \
.filter(Report.bug_id.notin_(report_hashes))
if run_ids:
results = results.filter(Report.run_id.in_(run_ids))
return [res[0] for res in results]
elif diff_type == DiffType.UNRESOLVED:
results = session.query(Report.bug_id) \
.filter(Report.bug_id.in_(report_hashes)) \
.filter(Report.detection_status.notin_(skip_statuses_str))
if run_ids:
results = results.filter(Report.run_id.in_(run_ids))
return [res[0] for res in results]
else:
return []
@exc_to_thrift_reqfail
@timeit
def getRunResults(self, run_ids, limit, offset, sort_types,
report_filter, cmp_data, get_details):
self.__require_access()
limit = verify_limit_range(limit)
with DBSession(self.__Session) as session:
results = []
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
sort_types, sort_type_map, order_type_map = \
get_sort_map(sort_types, True)
selects = [func.max(Report.id).label('id')]
for sort in sort_types:
sorttypes = sort_type_map.get(sort.type)
for sorttype in sorttypes:
if sorttype[0] != 'bug_path_length':
selects.append(func.max(sorttype[0])
.label(sorttype[1]))
unique_reports = session.query(*selects)
unique_reports = apply_report_filter(unique_reports,
filter_expression)
unique_reports = unique_reports \
.group_by(Report.bug_id) \
.subquery()
# Sort the results
sorted_reports = \
session.query(unique_reports.c.id)
sorted_reports = sort_results_query(sorted_reports,
sort_types,
sort_type_map,
order_type_map,
True)
sorted_reports = sorted_reports \
.limit(limit).offset(offset).subquery()
q = session.query(Report.id, Report.bug_id,
Report.checker_message, Report.checker_id,
Report.severity, Report.detected_at,
Report.fixed_at, ReviewStatus,
File.filename, File.filepath,
Report.path_length, Report.analyzer_name) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.outerjoin(sorted_reports,
sorted_reports.c.id == Report.id) \
.filter(sorted_reports.c.id.isnot(None))
# We have to sort the results again because an ORDER BY in a
# subtable is broken by the JOIN.
q = sort_results_query(q,
sort_types,
sort_type_map,
order_type_map)
query_result = q.all()
# Get report details if it is required.
report_details = {}
if get_details:
report_ids = [r[0] for r in query_result]
report_details = get_report_details(session, report_ids)
for report_id, bug_id, checker_msg, checker, severity, \
detected_at, fixed_at, status, filename, path, \
bug_path_len, analyzer_name in query_result:
review_data = create_review_data(status)
results.append(
ReportData(bugHash=bug_id,
checkedFile=filename,
checkerMsg=checker_msg,
checkerId=checker,
severity=severity,
reviewData=review_data,
detectedAt=str(detected_at),
fixedAt=str(fixed_at),
bugPathLength=bug_path_len,
details=report_details.get(report_id),
analyzerName=analyzer_name))
else:
q = session.query(Report.run_id, Report.id, Report.file_id,
Report.line, Report.column,
Report.detection_status, Report.bug_id,
Report.checker_message, Report.checker_id,
Report.severity, Report.detected_at,
Report.fixed_at, ReviewStatus,
File.filepath,
Report.path_length, Report.analyzer_name) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression)
sort_types, sort_type_map, order_type_map = \
get_sort_map(sort_types)
q = sort_results_query(q, sort_types, sort_type_map,
order_type_map)
q = q.limit(limit).offset(offset)
query_result = q.all()
# Get report details if it is required.
report_details = {}
if get_details:
report_ids = [r[1] for r in query_result]
report_details = get_report_details(session, report_ids)
for run_id, report_id, file_id, line, column, d_status, \
bug_id, checker_msg, checker, severity, detected_at,\
fixed_at, r_status, path, bug_path_len, analyzer_name \
in query_result:
review_data = create_review_data(r_status)
results.append(
ReportData(runId=run_id,
bugHash=bug_id,
checkedFile=path,
checkerMsg=checker_msg,
reportId=report_id,
fileId=file_id,
line=line,
column=column,
checkerId=checker,
severity=severity,
reviewData=review_data,
detectionStatus=detection_status_enum(
d_status),
detectedAt=str(detected_at),
fixedAt=str(fixed_at) if fixed_at else None,
bugPathLength=bug_path_len,
details=report_details.get(report_id),
analyzerName=analyzer_name))
return results
@timeit
def getRunReportCounts(self, run_ids, report_filter, limit, offset):
"""
Count the results separately for multiple runs.
If an empty run id list is provided the report
counts will be calculated for all of the available runs.
"""
self.__require_access()
limit = verify_limit_range(limit)
results = []
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter)
count_expr = create_count_expression(report_filter)
q = session.query(Run.id,
Run.name,
count_expr) \
.select_from(Report)
q = q.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.outerjoin(Run,
Report.run_id == Run.id) \
.filter(filter_expression) \
.order_by(Run.name) \
.group_by(Run.id)
if limit:
q = q.limit(limit).offset(offset)
for run_id, run_name, count in q:
report_count = RunReportCount(runId=run_id,
name=run_name,
reportCount=count)
results.append(report_count)
return results
@exc_to_thrift_reqfail
@timeit
def getRunResultCount(self, run_ids, report_filter, cmp_data):
self.__require_access()
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
q = session.query(Report.bug_id)
q = apply_report_filter(q, filter_expression)
if report_filter is not None and report_filter.isUnique:
q = q.group_by(Report.bug_id)
report_count = q.count()
if report_count is None:
report_count = 0
return report_count
@staticmethod
@timeit
def __construct_bug_item_list(session, report_id, item_type):
q = session.query(item_type) \
.filter(item_type.report_id == report_id) \
.order_by(item_type.order)
bug_items = []
for event in q:
f = session.query(File).get(event.file_id)
bug_items.append((event, f.filepath))
return bug_items
@exc_to_thrift_reqfail
@timeit
def getReportDetails(self, reportId):
"""
Parameters:
- reportId
"""
self.__require_access()
with DBSession(self.__Session) as session:
return get_report_details(session, [reportId])[reportId]
def _setReviewStatus(self, report_id, status, message, session):
"""
This function sets the review status of the given report. This is the
implementation of changeReviewStatus(), but it is also extended with
a session parameter which represents a database transaction. This is
needed because during storage a specific session object has to be used.
"""
report = session.query(Report).get(report_id)
if report:
review_status = session.query(ReviewStatus).get(report.bug_id)
if review_status is None:
review_status = ReviewStatus()
review_status.bug_hash = report.bug_id
user = self.__get_username()
old_status = review_status.status if review_status.status \
else review_status_str(ttypes.ReviewStatus.UNREVIEWED)
old_msg = review_status.message.decode('utf-8') \
if review_status.message else None
review_status.status = review_status_str(status)
review_status.author = user
review_status.message = message.encode('utf8') if message else b''
review_status.date = datetime.now()
session.add(review_status)
# Create a system comment if the review status or the message is
# changed.
if old_status != review_status.status or old_msg != message:
old_review_status = escape_whitespaces(old_status.capitalize())
new_review_status = \
escape_whitespaces(review_status.status.capitalize())
if message:
system_comment_msg = \
'rev_st_changed_msg {0} {1} {2}'.format(
old_review_status, new_review_status,
escape_whitespaces(message))
else:
system_comment_msg = 'rev_st_changed {0} {1}'.format(
old_review_status, new_review_status)
system_comment = self.__add_comment(review_status.bug_hash,
system_comment_msg,
CommentKindValue.SYSTEM)
session.add(system_comment)
session.flush()
return True
else:
msg = "No report found in the database."
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def isReviewStatusChangeDisabled(self):
"""
Return True if review status change is disabled.
"""
with DBSession(self.__config_database) as session:
product = session.query(Product).get(self.__product.id)
return product.is_review_status_change_disabled
@exc_to_thrift_reqfail
@timeit
def changeReviewStatus(self, report_id, status, message):
"""
Change review status of the bug by report id.
"""
self.__require_permission([permissions.PRODUCT_ACCESS,
permissions.PRODUCT_STORE])
if self.isReviewStatusChangeDisabled():
msg = "Review status change is disabled!"
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL, msg)
with DBSession(self.__Session) as session:
res = self._setReviewStatus(report_id, status, message, session)
session.commit()
LOG.info("Review status of report '%s' was changed to '%s' by %s.",
report_id, review_status_str(status),
self.__get_username())
return res
@exc_to_thrift_reqfail
@timeit
def getComments(self, report_id):
"""
Return the list of comments for the given bug.
"""
self.__require_access()
with DBSession(self.__Session) as session:
report = session.query(Report).get(report_id)
if report:
result = []
comments = session.query(Comment) \
.filter(Comment.bug_hash == report.bug_id) \
.order_by(Comment.created_at.desc()) \
.all()
context = webserver_context.get_context()
for comment in comments:
message = comment.message.decode('utf-8')
sys_comment = comment_kind_from_thrift_type(
ttypes.CommentKind.SYSTEM)
if comment.kind == sys_comment:
elements = shlex.split(message)
system_comment = context.system_comment_map.get(
elements[0])
if system_comment:
for idx, value in enumerate(elements[1:]):
system_comment = system_comment.replace(
'{' + str(idx) + '}', value)
message = system_comment
result.append(CommentData(
comment.id,
comment.author,
message,
str(comment.created_at),
comment_kind_to_thrift_type(comment.kind)))
return result
else:
msg = 'Report id ' + str(report_id) + \
' was not found in the database.'
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def getCommentCount(self, report_id):
"""
Return the number of comments for the given bug.
"""
self.__require_access()
with DBSession(self.__Session) as session:
report = session.query(Report).get(report_id)
if report:
commentCount = session.query(Comment) \
.filter(Comment.bug_hash == report.bug_id) \
.count()
if commentCount is None:
commentCount = 0
return commentCount
@exc_to_thrift_reqfail
@timeit
def addComment(self, report_id, comment_data):
""" Add new comment for the given bug. """
self.__require_access()
if not comment_data.message.strip():
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
'The comment message can not be empty!')
with DBSession(self.__Session) as session:
report = session.query(Report).get(report_id)
if report:
comment = self.__add_comment(report.bug_id,
comment_data.message)
session.add(comment)
session.commit()
return True
else:
msg = 'Report id ' + str(report_id) + \
' was not found in the database.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def updateComment(self, comment_id, content):
"""
Update the given comment message with new content. We allow
comments to be updated by it's original author only, except for
Anyonymous comments that can be updated by anybody.
"""
self.__require_access()
if not content.strip():
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
'The comment message can not be empty!')
with DBSession(self.__Session) as session:
user = self.__get_username()
comment = session.query(Comment).get(comment_id)
if comment:
if comment.author != 'Anonymous' and comment.author != user:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
'Unathorized comment modification!')
# Create system comment if the message is changed.
message = comment.message.decode('utf-8')
if message != content:
system_comment_msg = 'comment_changed {0} {1}'.format(
escape_whitespaces(message),
escape_whitespaces(content))
system_comment = \
self.__add_comment(comment.bug_hash,
system_comment_msg,
CommentKindValue.SYSTEM)
session.add(system_comment)
comment.message = content.encode('utf-8')
session.add(comment)
session.commit()
return True
else:
msg = 'Comment id ' + str(comment_id) + \
' was not found in the database.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def removeComment(self, comment_id):
"""
Remove the comment. We allow comments to be removed by it's
original author only, except for Anyonymous comments that can be
updated by anybody.
"""
self.__require_access()
user = self.__get_username()
with DBSession(self.__Session) as session:
comment = session.query(Comment).get(comment_id)
if comment:
if comment.author != 'Anonymous' and comment.author != user:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
'Unathorized comment modification!')
session.delete(comment)
session.commit()
LOG.info("Comment '%s...' was removed from bug hash '%s' by "
"'%s'.", comment.message[:10], comment.bug_hash,
self.__get_username())
return True
else:
msg = 'Comment id ' + str(comment_id) + \
' was not found in the database.'
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def getCheckerDoc(self, checkerId):
"""
Parameters:
- checkerId
"""
missing_doc = "No documentation found for checker: " + checkerId + \
"\n\nPlease refer to the documentation at the "
if "." in checkerId:
sa_link = "http://clang-analyzer.llvm.org/available_checks.html"
missing_doc += "[ClangSA](" + sa_link + ")"
elif "-" in checkerId:
tidy_link = "http://clang.llvm.org/extra/clang-tidy/checks/" + \
checkerId + ".html"
missing_doc += "[ClangTidy](" + tidy_link + ")"
missing_doc += " homepage."
try:
md_file = self.__checker_doc_map.get(checkerId)
if md_file:
md_file = os.path.join(self.__checker_md_docs, md_file)
try:
with open(md_file, 'r',
encoding='utf-8',
errors='ignore') as md_content:
missing_doc = md_content.read()
except (IOError, OSError):
LOG.warning("Failed to read checker documentation: %s",
md_file)
return missing_doc
except Exception as ex:
msg = str(ex)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.IOERROR, msg)
@exc_to_thrift_reqfail
@timeit
def getSourceFileData(self, fileId, fileContent, encoding):
"""
Parameters:
- fileId
- fileContent
- enum Encoding
"""
self.__require_access()
with DBSession(self.__Session) as session:
sourcefile = session.query(File).get(fileId)
if sourcefile is None:
return SourceFileData()
if fileContent:
cont = session.query(FileContent).get(sourcefile.content_hash)
source = zlib.decompress(cont.content)
if encoding == Encoding.BASE64:
source = base64.b64encode(source)
return SourceFileData(fileId=sourcefile.id,
filePath=sourcefile.filepath,
fileContent=source.decode('utf-8'))
else:
return SourceFileData(fileId=sourcefile.id,
filePath=sourcefile.filepath)
@exc_to_thrift_reqfail
@timeit
def getLinesInSourceFileContents(self, lines_in_files_requested, encoding):
self.__require_access()
with DBSession(self.__Session) as session:
res = defaultdict(lambda: defaultdict(str))
for lines_in_file in lines_in_files_requested:
sourcefile = session.query(File).get(lines_in_file.fileId)
cont = session.query(FileContent).get(sourcefile.content_hash)
lines = zlib.decompress(
cont.content).decode('utf-8', 'ignore').split('\n')
for line in lines_in_file.lines:
content = '' if len(lines) < line else lines[line - 1]
if encoding == Encoding.BASE64:
content = convert.to_b64(content)
res[lines_in_file.fileId][line] = content
return res
@exc_to_thrift_reqfail
@timeit
def getCheckerCounts(self, run_ids, report_filter, cmp_data, limit,
offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
limit = verify_limit_range(limit)
results = []
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.checker_id).label(
'checker_id'),
func.max(Report.severity).label(
'severity'),
Report.bug_id)
else:
q = session.query(Report.checker_id,
Report.severity,
func.count(Report.id))
q = apply_report_filter(q, filter_expression)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
unique_checker_q = session.query(q.c.checker_id,
func.max(q.c.severity),
func.count(q.c.bug_id)) \
.group_by(q.c.checker_id) \
.order_by(q.c.checker_id)
else:
unique_checker_q = q.group_by(Report.checker_id,
Report.severity) \
.order_by(Report.checker_id)
if limit:
unique_checker_q = unique_checker_q.limit(limit).offset(offset)
for name, severity, count in unique_checker_q:
checker_count = CheckerCount(name=name,
severity=severity,
count=count)
results.append(checker_count)
return results
@exc_to_thrift_reqfail
@timeit
def getAnalyzerNameCounts(self, run_ids, report_filter, cmp_data, limit,
offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
limit = verify_limit_range(limit)
results = {}
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.analyzer_name).label(
'analyzer_name'),
Report.bug_id)
else:
q = session.query(Report.analyzer_name,
func.count(Report.id))
q = apply_report_filter(q, filter_expression)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
analyzer_name_q = session.query(q.c.analyzer_name,
func.count(q.c.bug_id)) \
.group_by(q.c.analyzer_name) \
.order_by(q.c.analyzer_name)
else:
analyzer_name_q = q.group_by(Report.analyzer_name) \
.order_by(Report.analyzer_name)
if limit:
analyzer_name_q = analyzer_name_q.limit(limit).offset(offset)
for name, count in analyzer_name_q:
results[name] = count
return results
@exc_to_thrift_reqfail
@timeit
def getSeverityCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = {}
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.severity).label('severity'),
Report.bug_id)
else:
q = session.query(Report.severity,
func.count(Report.id))
q = apply_report_filter(q, filter_expression)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
severities = session.query(q.c.severity,
func.count(q.c.bug_id)) \
.group_by(q.c.severity)
else:
severities = q.group_by(Report.severity)
results = dict(severities)
return results
@exc_to_thrift_reqfail
@timeit
def getCheckerMsgCounts(self, run_ids, report_filter, cmp_data, limit,
offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
limit = verify_limit_range(limit)
results = {}
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.checker_message).label(
'checker_message'),
Report.bug_id)
else:
q = session.query(Report.checker_message,
func.count(Report.id))
q = apply_report_filter(q, filter_expression)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
checker_messages = session.query(q.c.checker_message,
func.count(q.c.bug_id)) \
.group_by(q.c.checker_message) \
.order_by(q.c.checker_message)
else:
checker_messages = q.group_by(Report.checker_message) \
.order_by(Report.checker_message)
if limit:
checker_messages = checker_messages.limit(limit).offset(offset)
results = dict(checker_messages.all())
return results
@exc_to_thrift_reqfail
@timeit
def getReviewStatusCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = defaultdict(int)
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(Report.bug_id,
func.max(ReviewStatus.status).label(
'status'))
else:
q = session.query(func.max(Report.bug_id),
ReviewStatus.status,
func.count(Report.id))
q = apply_report_filter(q, filter_expression)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
review_statuses = session.query(func.max(q.c.bug_id),
q.c.status,
func.count(q.c.bug_id)) \
.group_by(q.c.status)
else:
review_statuses = q.group_by(ReviewStatus.status)
for _, rev_status, count in review_statuses:
if rev_status is None:
# If no review status is set count it as unreviewed.
rev_status = ttypes.ReviewStatus.UNREVIEWED
results[rev_status] += count
else:
rev_status = review_status_enum(rev_status)
results[rev_status] += count
return results
@exc_to_thrift_reqfail
@timeit
def getFileCounts(self, run_ids, report_filter, cmp_data, limit, offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
limit = verify_limit_range(limit)
results = {}
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
stmt = session.query(Report.bug_id,
Report.file_id)
stmt = apply_report_filter(stmt, filter_expression)
if report_filter is not None and report_filter.isUnique:
stmt = stmt.group_by(Report.bug_id, Report.file_id)
stmt = stmt.subquery()
# When using pg8000, 1 cannot be passed as parameter to the count
# function. This is the reason why we have to convert it to
# Integer (see: https://github.com/mfenniak/pg8000/issues/110)
count_int = cast(1, sqlalchemy.Integer)
report_count = session.query(stmt.c.file_id,
func.count(count_int).label(
'report_count')) \
.group_by(stmt.c.file_id)
if limit:
report_count = report_count.limit(limit).offset(offset)
report_count = report_count.subquery()
file_paths = session.query(File.filepath,
report_count.c.report_count) \
.join(report_count,
report_count.c.file_id == File.id)
for fp, count in file_paths:
results[fp] = count
return results
@exc_to_thrift_reqfail
@timeit
def getRunHistoryTagCounts(self, run_ids, report_filter, cmp_data, limit,
offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
limit = verify_limit_range(limit)
results = []
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
tag_run_ids = session.query(RunHistory.run_id.distinct()) \
.filter(RunHistory.version_tag.isnot(None)) \
if run_ids:
tag_run_ids = tag_run_ids.filter(
RunHistory.run_id.in_(run_ids))
tag_run_ids = tag_run_ids.subquery()
report_cnt_q = session.query(Report.run_id,
Report.bug_id,
Report.detected_at,
Report.fixed_at) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression) \
.filter(Report.run_id.in_(tag_run_ids)) \
.subquery()
is_unique = report_filter is not None and report_filter.isUnique
count_expr = func.count(report_cnt_q.c.bug_id if not is_unique
else report_cnt_q.c.bug_id.distinct())
count_q = session.query(RunHistory.id.label('run_history_id'),
count_expr.label('report_count')) \
.outerjoin(report_cnt_q,
report_cnt_q.c.run_id == RunHistory.run_id) \
.filter(RunHistory.version_tag.isnot(None)) \
.filter(get_open_reports_date_filter_query(report_cnt_q.c)) \
.group_by(RunHistory.id) \
.subquery()
tag_q = session.query(RunHistory.run_id.label('run_id'),
RunHistory.id.label('run_history_id')) \
.filter(RunHistory.version_tag.isnot(None))
if run_ids:
tag_q = tag_q.filter(RunHistory.run_id.in_(run_ids))
if report_filter and report_filter.runTag is not None:
tag_q = tag_q.filter(RunHistory.id.in_(report_filter.runTag))
tag_q = tag_q.subquery()
q = session.query(tag_q.c.run_history_id,
func.max(Run.id),
func.max(Run.name).label('run_name'),
func.max(RunHistory.id),
func.max(RunHistory.time),
func.max(RunHistory.version_tag),
func.max(count_q.c.report_count)) \
.outerjoin(RunHistory,
RunHistory.id == tag_q.c.run_history_id) \
.outerjoin(Run, Run.id == tag_q.c.run_id) \
.outerjoin(count_q,
count_q.c.run_history_id == RunHistory.id) \
.filter(RunHistory.version_tag.isnot(None)) \
.group_by(tag_q.c.run_history_id, RunHistory.time) \
.order_by(RunHistory.time.desc())
if limit:
q = q.limit(limit).offset(offset)
for _, run_id, run_name, tag_id, version_time, tag, count in q:
if tag:
results.append(RunTagCount(id=tag_id,
time=str(version_time),
name=tag,
runName=run_name,
runId=run_id,
count=count if count else 0))
return results
@exc_to_thrift_reqfail
@timeit
def getDetectionStatusCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = {}
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, run_ids,
report_filter, cmp_data)
count_expr = func.count(literal_column('*'))
q = session.query(Report.detection_status,
count_expr)
q = apply_report_filter(q, filter_expression)
detection_stats = q.group_by(Report.detection_status).all()
results = dict(detection_stats)
results = {
detection_status_enum(k): v for k,
v in results.items()}
return results
# -----------------------------------------------------------------------
@timeit
def getPackageVersion(self):
return self.__package_version
# -----------------------------------------------------------------------
@exc_to_thrift_reqfail
@timeit
def removeRunResults(self, run_ids):
self.__require_store()
failed = False
for run_id in run_ids:
try:
self.removeRun(run_id, None)
except Exception as ex:
LOG.error("Failed to remove run: %s", run_id)
LOG.error(ex)
failed = True
return not failed
def __removeReports(self, session, report_ids, chunk_size=500):
"""
Removing reports in chunks.
"""
for r_ids in [report_ids[i:i + chunk_size] for
i in range(0, len(report_ids),
chunk_size)]:
session.query(Report) \
.filter(Report.id.in_(r_ids)) \
.delete(synchronize_session=False)
@exc_to_thrift_reqfail
@timeit
def removeRunReports(self, run_ids, report_filter, cmp_data):
self.__require_store()
if not run_ids:
run_ids = []
if cmp_data and cmp_data.runIds:
run_ids.extend(cmp_data.runIds)
with DBSession(self.__Session) as session:
check_remove_runs_lock(session, run_ids)
try:
filter_expression = process_report_filter(session, run_ids,
report_filter,
cmp_data)
q = session.query(Report.id) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression)
reports_to_delete = [r[0] for r in q]
if reports_to_delete:
self.__removeReports(session, reports_to_delete)
session.commit()
session.close()
LOG.info("The following reports were removed by '%s': %s",
self.__get_username(), reports_to_delete)
except Exception as ex:
session.rollback()
LOG.error("Database cleanup failed.")
LOG.error(ex)
return False
# Delete files and contents that are not present
# in any bug paths.
db_cleanup.remove_unused_files(self.__Session)
return True
@exc_to_thrift_reqfail
@timeit
def removeRun(self, run_id, run_filter):
self.__require_store()
# Remove the whole run.
with DBSession(self.__Session) as session:
check_remove_runs_lock(session, [run_id])
if not run_filter:
run_filter = RunFilter(ids=[run_id])
q = session.query(Run)
q = process_run_filter(session, q, run_filter)
q.delete(synchronize_session=False)
session.commit()
session.close()
runs = run_filter.names if run_filter.names else run_filter.ids
LOG.info("Runs '%s' were removed by '%s'.", runs,
self.__get_username())
# Delete files and contents that are not present
# in any bug paths.
db_cleanup.remove_unused_files(self.__Session)
return True
@exc_to_thrift_reqfail
@timeit
def updateRunData(self, run_id, new_run_name):
self.__require_store()
if not new_run_name:
msg = 'No new run name was given to update the run.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL, msg)
with DBSession(self.__Session) as session:
check_new_run_name = session.query(Run) \
.filter(Run.name == new_run_name) \
.all()
if check_new_run_name:
msg = "New run name '" + new_run_name + "' already exists."
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
run_data = session.query(Run).get(run_id)
if run_data:
old_run_name = run_data.name
run_data.name = new_run_name
session.add(run_data)
session.commit()
LOG.info("Run name '%s' (%d) was changed to %s by '%s'.",
old_run_name, run_id, new_run_name,
self.__get_username())
return True
else:
msg = 'Run id ' + str(run_id) + \
' was not found in the database.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
return True
@exc_to_thrift_reqfail
def getSuppressFile(self):
"""
DEPRECATED the server is not started with a suppress file anymore.
Returning empty string.
"""
self.__require_access()
return ''
@exc_to_thrift_reqfail
@timeit
def addSourceComponent(self, name, value, description):
"""
Adds a new source if it does not exist or updates an old one.
"""
self.__require_admin()
with DBSession(self.__Session) as session:
component = session.query(SourceComponent).get(name)
user = self.__auth_session.user if self.__auth_session else None
if component:
component.value = value.encode('utf-8')
component.description = description
component.user = user
else:
component = SourceComponent(name,
value.encode('utf-8'),
description,
user)
session.add(component)
session.commit()
return True
@exc_to_thrift_reqfail
@timeit
def getSourceComponents(self, component_filter):
"""
Returns the available source components.
"""
self.__require_access()
with DBSession(self.__Session) as session:
q = session.query(SourceComponent)
if component_filter and component_filter:
sql_component_filter = [SourceComponent.name.ilike(conv(cf))
for cf in component_filter]
q = q.filter(*sql_component_filter)
q = q.order_by(SourceComponent.name)
return list([SourceComponentData(c.name,
c.value.decode('utf-8'),
c.description) for c in q])
@exc_to_thrift_reqfail
@timeit
def removeSourceComponent(self, name):
"""
Removes a source component.
"""
self.__require_admin()
with DBSession(self.__Session) as session:
component = session.query(SourceComponent).get(name)
if component:
session.delete(component)
session.commit()
LOG.info("Source component '%s' has been removed by '%s'",
name, self.__get_username())
return True
else:
msg = 'Source component ' + str(name) + \
' was not found in the database.'
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def getMissingContentHashes(self, file_hashes):
self.__require_store()
if not file_hashes:
return []
with DBSession(self.__Session) as session:
q = session.query(FileContent) \
.options(sqlalchemy.orm.load_only('content_hash')) \
.filter(FileContent.content_hash.in_(file_hashes))
return list(set(file_hashes) -
set([fc.content_hash for fc in q]))
def __store_source_files(self, source_root, filename_to_hash,
trim_path_prefixes):
"""
Storing file contents from plist.
"""
file_path_to_id = {}
for file_name, file_hash in filename_to_hash.items():
source_file_name = os.path.join(source_root,
file_name.strip("/"))
source_file_name = os.path.realpath(source_file_name)
LOG.debug("Storing source file: %s", source_file_name)
trimmed_file_path = util.trim_path_prefixes(file_name,
trim_path_prefixes)
if not os.path.isfile(source_file_name):
# The file was not in the ZIP file, because we already
# have the content. Let's check if we already have a file
# record in the database or we need to add one.
LOG.debug(file_name + ' not found or already stored.')
with DBSession(self.__Session) as session:
fid = store_handler.addFileRecord(session,
trimmed_file_path,
file_hash)
if not fid:
LOG.error("File ID for %s is not found in the DB with "
"content hash %s. Missing from ZIP?",
source_file_name, file_hash)
file_path_to_id[file_name] = fid
LOG.debug("%d fileid found", fid)
continue
with DBSession(self.__Session) as session:
file_path_to_id[file_name] = \
store_handler.addFileContent(session,
trimmed_file_path,
source_file_name,
file_hash,
None)
return file_path_to_id
def __store_reports(self, session, report_dir, source_root, run_id,
file_path_to_id, run_history_time, severity_map,
wrong_src_code_comments, skip_handler,
checkers):
"""
Parse up and store the plist report files.
"""
all_reports = session.query(Report) \
.filter(Report.run_id == run_id) \
.all()
hash_map_reports = defaultdict(list)
for report in all_reports:
hash_map_reports[report.bug_id].append(report)
already_added = set()
new_bug_hashes = set()
# Get checker names which was enabled during the analysis.
enabled_checkers = set()
disabled_checkers = set()
checker_to_analyzer = dict()
for analyzer_name, analyzer_checkers in checkers.items():
if isinstance(analyzer_checkers, dict):
for checker_name, enabled in analyzer_checkers.items():
checker_to_analyzer[checker_name] = analyzer_name
if enabled:
enabled_checkers.add(checker_name)
else:
disabled_checkers.add(checker_name)
else:
enabled_checkers.update(analyzer_checkers)
for checker_name in analyzer_checkers:
checker_to_analyzer[checker_name] = analyzer_name
def checker_is_unavailable(checker_name):
"""
Returns True if the given checker is unavailable.
We filter out checkers which start with 'clang-diagnostic-' because
these are warnings and the warning list is not available right now.
FIXME: using the 'diagtool' could be a solution later so the
client can send the warning list to the server.
"""
return not checker_name.startswith('clang-diagnostic-') and \
enabled_checkers and checker_name not in enabled_checkers
def get_analyzer_name(report):
""" Get analyzer name for the given report. """
analyzer_name = checker_to_analyzer.get(report.check_name)
if analyzer_name:
return analyzer_name
if report.metadata:
return report.metadata.get("analyzer", {}).get("name")
# Processing PList files.
_, _, report_files = next(os.walk(report_dir), ([], [], []))
all_report_checkers = set()
for f in report_files:
if not f.endswith('.plist'):
continue
LOG.debug("Parsing input file '%s'", f)
try:
files, reports = plist_parser.parse_plist_file(
os.path.join(report_dir, f), source_root)
except Exception as ex:
LOG.error('Parsing the plist failed: %s', str(ex))
continue
file_ids = {}
if reports:
missing_ids_for_files = []
for file_name in files:
file_id = file_path_to_id.get(file_name, -1)
if file_id == -1:
missing_ids_for_files.append(file_name)
continue
file_ids[file_name] = file_id
if missing_ids_for_files:
LOG.error("Failed to get file path id for '%s'!",
file_name)
continue
# Store report.
for report in reports:
checker_name = report.main['check_name']
all_report_checkers.add(checker_name)
source_file = files[report.main['location']['file']]
if skip_handler.should_skip(source_file):
continue
bug_paths, bug_events, bug_extended_data = \
store_handler.collect_paths_events(report, file_ids,
files)
report_path_hash = get_report_path_hash(report)
if report_path_hash in already_added:
LOG.debug('Not storing report. Already added')
LOG.debug(report)
continue
LOG.debug("Storing check results to the database.")
LOG.debug("Storing report")
bug_id = report.main[
'issue_hash_content_of_line_in_context']
detection_status = 'new'
detected_at = run_history_time
if bug_id in hash_map_reports:
old_report = hash_map_reports[bug_id][0]
old_status = old_report.detection_status
detection_status = 'reopened' \
if old_status == 'resolved' else 'unresolved'
detected_at = old_report.detected_at
analyzer_name = get_analyzer_name(report)
report_id = store_handler.addReport(
session,
run_id,
file_ids[source_file],
report.main,
bug_paths,
bug_events,
bug_extended_data,
detection_status,
detected_at,
severity_map,
analyzer_name)
new_bug_hashes.add(bug_id)
already_added.add(report_path_hash)
last_report_event = report.bug_path[-1]
file_name = files[last_report_event['location']['file']]
source_file_name = os.path.realpath(
os.path.join(source_root, file_name.strip("/")))
if os.path.isfile(source_file_name):
report_line = last_report_event['location']['line']
source_file = os.path.basename(file_name)
src_comment_data = \
parse_codechecker_review_comment(source_file_name,
report_line,
checker_name)
if len(src_comment_data) == 1:
status = src_comment_data[0]['status']
rw_status = ttypes.ReviewStatus.FALSE_POSITIVE
if status == 'confirmed':
rw_status = ttypes.ReviewStatus.CONFIRMED
elif status == 'intentional':
rw_status = ttypes.ReviewStatus.INTENTIONAL
self._setReviewStatus(report_id,
rw_status,
src_comment_data[0]['message'],
session)
elif len(src_comment_data) > 1:
LOG.warning(
"Multiple source code comment can be found "
"for '%s' checker in '%s' at line %s. "
"This bug will not be suppressed!",
checker_name, source_file, report_line)
wrong_src_code = "{0}|{1}|{2}".format(source_file,
report_line,
checker_name)
wrong_src_code_comments.append(wrong_src_code)
LOG.debug("Storing done for report %d", report_id)
# If a checker was found in a plist file it can not be disabled so we
# will remove these checkers from the disabled checkers list and add
# these to the enabled checkers list.
disabled_checkers -= all_report_checkers
enabled_checkers |= all_report_checkers
reports_to_delete = set()
for bug_hash, reports in hash_map_reports.items():
if bug_hash in new_bug_hashes:
reports_to_delete.update([x.id for x in reports])
else:
for report in reports:
# We set the fix date of a report only if the report
# has not been fixed before.
if report.fixed_at:
continue
checker = report.checker_id
if checker in disabled_checkers:
report.detection_status = 'off'
elif checker_is_unavailable(checker):
report.detection_status = 'unavailable'
else:
report.detection_status = 'resolved'
report.fixed_at = run_history_time
if reports_to_delete:
self.__removeReports(session, list(reports_to_delete))
@staticmethod
@exc_to_thrift_reqfail
def __store_run_lock(session, name, username):
"""
Store a RunLock record for the given run name into the database.
"""
try:
# If the run can be stored, we need to lock it first. If there is
# already a lock in the database for the given run name which is
# expired and multiple processes are trying to get this entry from
# the database for update we may get the following exception:
# could not obtain lock on row in relation "run_locks"
# This is the reason why we have to wrap this query to a try/except
# block.
run_lock = session.query(RunLock) \
.filter(RunLock.name == name) \
.with_for_update(nowait=True).one_or_none()
except (sqlalchemy.exc.OperationalError,
sqlalchemy.exc.ProgrammingError) as ex:
LOG.error("Failed to get run lock for '%s': %s", name, ex)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"Someone is already storing to the same run. Please wait "
"while the other storage is finished and try it again.")
if not run_lock:
# If there is no lock record for the given run name, the run
# is not locked -- create a new lock.
run_lock = RunLock(name, username)
session.add(run_lock)
elif run_lock.has_expired(
db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE):
# There can be a lock in the database, which has already
# expired. In this case, we assume that the previous operation
# has failed, and thus, we can re-use the already present lock.
run_lock.touch()
run_lock.username = username
else:
# In case the lock exists and it has not expired, we must
# consider the run a locked one.
when = run_lock.when_expires(
db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE)
username = run_lock.username if run_lock.username is not None \
else "another user"
LOG.info("Refusing to store into run '%s' as it is locked by "
"%s. Lock will expire at '%s'.", name, username, when)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"The run named '{0}' is being stored into by {1}. If the "
"other store operation has failed, this lock will expire "
"at '{2}'.".format(name, username, when))
# At any rate, if the lock has been created or updated, commit it
# into the database.
try:
session.commit()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.orm.exc.StaleDataError):
# The commit of this lock can fail.
#
# In case two store ops attempt to lock the same run name at the
# same time, committing the lock in the transaction that commits
# later will result in an IntegrityError due to the primary key
# constraint.
#
# In case two store ops attempt to lock the same run name with
# reuse and one of the operation hangs long enough before COMMIT
# so that the other operation commits and thus removes the lock
# record, StaleDataError is raised. In this case, also consider
# the run locked, as the data changed while the transaction was
# waiting, as another run wholly completed.
LOG.info("Run '%s' got locked while current transaction "
"tried to acquire a lock. Considering run as locked.",
name)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"The run named '{0}' is being stored into by another "
"user.".format(name))
@staticmethod
@exc_to_thrift_reqfail
def __free_run_lock(session, name):
"""
Remove the lock from the database for the given run name.
"""
# Using with_for_update() here so the database (in case it supports
# this operation) locks the lock record's row from any other access.
run_lock = session.query(RunLock) \
.filter(RunLock.name == name) \
.with_for_update(nowait=True).one()
session.delete(run_lock)
session.commit()
def __check_run_limit(self, run_name):
"""
Checks the maximum allowed of uploadable runs for the current product.
"""
max_run_count = self.__manager.get_max_run_count()
with DBSession(self.__config_database) as session:
product = session.query(Product).get(self.__product.id)
if product.run_limit:
max_run_count = product.run_limit
# Session that handles constraints on the run.
with DBSession(self.__Session) as session:
if max_run_count:
LOG.debug("Check the maximum number of allowed "
"runs which is %d", max_run_count)
run = session.query(Run) \
.filter(Run.name == run_name) \
.one_or_none()
# If max_run_count is not set in the config file, it will allow
# the user to upload unlimited runs.
run_count = session.query(Run.id).count()
# If we are not updating a run or the run count is reached the
# limit it will throw an exception.
if not run and run_count >= max_run_count:
remove_run_count = run_count - max_run_count + 1
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
'You reached the maximum number of allowed runs '
'({0}/{1})! Please remove at least {2} run(s) before '
'you try it again.'.format(run_count,
max_run_count,
remove_run_count))
@exc_to_thrift_reqfail
@timeit
def massStoreRun(self, name, tag, version, b64zip, force,
trim_path_prefixes, description):
self.__require_store()
start_time = time.time()
user = self.__auth_session.user if self.__auth_session else None
# Check constraints of the run.
self.__check_run_limit(name)
with DBSession(self.__Session) as session:
ThriftRequestHandler.__store_run_lock(session, name, user)
wrong_src_code_comments = []
try:
with TemporaryDirectory() as zip_dir:
zip_size = unzip(b64zip, zip_dir)
LOG.debug("Using unzipped folder '%s'", zip_dir)
source_root = os.path.join(zip_dir, 'root')
report_dir = os.path.join(zip_dir, 'reports')
metadata_file = os.path.join(report_dir, 'metadata.json')
skip_file = os.path.join(report_dir, 'skip_file')
content_hash_file = os.path.join(zip_dir,
'content_hashes.json')
skip_handler = skiplist_handler.SkipListHandler()
if os.path.exists(skip_file):
LOG.debug("Pocessing skip file %s", skip_file)
try:
with open(skip_file,
encoding="utf-8",
errors="ignore") as sf:
skip_handler = \
skiplist_handler.SkipListHandler(sf.read())
except (IOError, OSError) as err:
LOG.error("Failed to open skip file")
LOG.error(err)
filename_to_hash = util.load_json_or_empty(content_hash_file,
{})
file_path_to_id = self.__store_source_files(source_root,
filename_to_hash,
trim_path_prefixes)
run_history_time = datetime.now()
metadata_parser = MetadataInfoParser()
check_commands, check_durations, cc_version, statistics, \
checkers = metadata_parser.get_metadata_info(metadata_file)
command = ''
if len(check_commands) == 1:
command = list(check_commands)[0]
elif len(check_commands) > 1:
command = "multiple analyze calls: " + \
'; '.join(check_commands)
durations = 0
if check_durations:
# Round the duration to seconds.
durations = int(sum(check_durations))
# When we use multiple server instances and we try to run
# multiple storage to each server which contain at least two
# reports which have the same report hash and have source code
# comments it is possible that the following exception will be
# thrown: (psycopg2.extensions.TransactionRollbackError)
# deadlock detected.
# The problem is that the report hash is the key for the
# review data table and both of the store actions try to
# update the same review data row.
# Neither of the two processes can continue, and they will wait
# for each other indefinitely. PostgreSQL in this case will
# terminate one transaction with the above exception.
# For this reason in case of failure we will wait some seconds
# and try to run the storage again.
# For more information see #2655 and #2653 issues on github.
max_num_of_tries = 3
num_of_tries = 0
sec_to_wait_after_failure = 60
while True:
try:
# This session's transaction buffer stores the actual
# run data into the database.
with DBSession(self.__Session) as session:
# Load the lock record for "FOR UPDATE" so that the
# transaction that handles the run's store
# operations has a lock on the database row itself.
run_lock = session.query(RunLock) \
.filter(RunLock.name == name) \
.with_for_update(nowait=True).one()
# Do not remove this seemingly dummy print, we need
# to make sure that the execution of the SQL
# statement is not optimised away and the fetched
# row is not garbage collected.
LOG.debug("Storing into run '%s' locked at '%s'.",
name, run_lock.locked_at)
# Actual store operation begins here.
user_name = self.__get_username()
run_id = \
store_handler.addCheckerRun(session,
command,
name,
tag,
user_name,
run_history_time,
version,
force,
cc_version,
statistics,
description)
self.__store_reports(session,
report_dir,
source_root,
run_id,
file_path_to_id,
run_history_time,
self.__context.severity_map,
wrong_src_code_comments,
skip_handler,
checkers)
store_handler.setRunDuration(session,
run_id,
durations)
store_handler.finishCheckerRun(session, run_id)
session.commit()
LOG.info("'%s' stored results (%s KB) to run '%s' "
"in %s seconds.", user_name,
round(zip_size / 1024), name,
round(time.time() - start_time, 2))
return run_id
except (sqlalchemy.exc.OperationalError,
sqlalchemy.exc.ProgrammingError) as ex:
num_of_tries += 1
if num_of_tries == max_num_of_tries:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.
ErrorCode.DATABASE,
"Storing reports to the database failed: "
"{0}".format(ex))
LOG.error("Storing reports of '%s' run failed: "
"%s.\nWaiting %d sec before trying to store "
"it again!", name, ex,
sec_to_wait_after_failure)
time.sleep(sec_to_wait_after_failure)
sec_to_wait_after_failure *= 2
except Exception as ex:
LOG.error("Failed to store results: %s", ex)
import traceback
traceback.print_exc()
raise
finally:
# In any case if the "try" block's execution began, a run lock must
# exist, which can now be removed, as storage either completed
# successfully, or failed in a detectable manner.
# (If the failure is undetectable, the coded grace period expiry
# of the lock will allow further store operations to the given
# run name.)
with DBSession(self.__Session) as session:
ThriftRequestHandler.__free_run_lock(session, name)
if wrong_src_code_comments:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.SOURCE_FILE,
"Multiple source code comment can be found with the same "
"checker name for same bug!",
wrong_src_code_comments)
@exc_to_thrift_reqfail
@timeit
def allowsStoringAnalysisStatistics(self):
self.__require_store()
return True if self.__manager.get_analysis_statistics_dir() else False
@exc_to_thrift_reqfail
@timeit
def getAnalysisStatisticsLimits(self):
self.__require_store()
cfg = dict()
# Get the limit of failure zip size.
failure_zip_size = self.__manager.get_failure_zip_size()
if failure_zip_size:
cfg[ttypes.StoreLimitKind.FAILURE_ZIP_SIZE] = failure_zip_size
# Get the limit of compilation database size.
compilation_database_size = \
self.__manager.get_compilation_database_size()
if compilation_database_size:
cfg[ttypes.StoreLimitKind.COMPILATION_DATABASE_SIZE] = \
compilation_database_size
return cfg
@exc_to_thrift_reqfail
@timeit
def storeAnalysisStatistics(self, run_name, b64zip):
self.__require_store()
report_dir_store = self.__manager.get_analysis_statistics_dir()
if report_dir_store:
try:
product_dir = os.path.join(report_dir_store,
self.__product.endpoint)
# Create report store directory.
if not os.path.exists(product_dir):
os.makedirs(product_dir)
# Removes and replaces special characters in the run name.
run_name = slugify(run_name)
run_zip_file = os.path.join(product_dir, run_name + '.zip')
with open(run_zip_file, 'wb') as run_zip:
run_zip.write(zlib.decompress(
base64.b64decode(b64zip.encode('utf-8'))))
return True
except Exception as ex:
LOG.error(str(ex))
return False
return False
@exc_to_thrift_reqfail
@timeit
def getAnalysisStatistics(self, run_id, run_history_id):
self.__require_access()
analyzer_statistics = {}
with DBSession(self.__Session) as session:
query = session.query(AnalyzerStatistic,
Run.id)
if run_id:
query = query.filter(Run.id == run_id)
elif run_history_id:
query = query.filter(RunHistory.id == run_history_id)
query = query \
.outerjoin(RunHistory,
RunHistory.id == AnalyzerStatistic.run_history_id) \
.outerjoin(Run,
Run.id == RunHistory.run_id)
for stat, run_id in query:
failed_files = zlib.decompress(stat.failed_files).decode(
'utf-8').split('\n') if stat.failed_files else None
analyzer_version = zlib.decompress(
stat.version).decode('utf-8') if stat.version else None
analyzer_statistics[stat.analyzer_type] = \
ttypes.AnalyzerStatistics(version=analyzer_version,
failed=stat.failed,
failedFilePaths=failed_files,
successful=stat.successful)
return analyzer_statistics
| 1 | 12,551 | Should we add some default case if non of the above return some analyzer name? Something like `unknown analyzer`? | Ericsson-codechecker | c |
@@ -115,7 +115,7 @@ module Faker
keywords << :special_characters if legacy_special_characters != NOT_GIVEN
end
- min_alpha = mix_case ? 2 : 0
+ min_alpha = mix_case && min_length > 1 ? 2 : 0
temp = Lorem.characters(number: min_length, min_alpha: min_alpha)
diff_length = max_length - min_length
| 1 | # frozen_string_literal: true
module Faker
class Internet < Base
class << self
def email(legacy_name = NOT_GIVEN, legacy_separators = NOT_GIVEN, name: nil, separators: nil, domain: nil)
warn_for_deprecated_arguments do |keywords|
keywords << :name if legacy_name != NOT_GIVEN
keywords << :separators if legacy_separators != NOT_GIVEN
end
local_part = if separators
username(specifier: name, separators: separators)
else
username(specifier: name)
end
sanitized_local_part = sanitize_email_local_part(local_part)
construct_email(sanitized_local_part, domain_name(domain: domain))
end
def free_email(legacy_name = NOT_GIVEN, name: nil)
warn_for_deprecated_arguments do |keywords|
keywords << :name if legacy_name != NOT_GIVEN
end
construct_email(
sanitize_email_local_part(username(specifier: name)),
fetch('internet.free_email')
)
end
def safe_email(legacy_name = NOT_GIVEN, name: nil)
warn_for_deprecated_arguments do |keywords|
keywords << :name if legacy_name != NOT_GIVEN
end
construct_email(
sanitize_email_local_part(username(specifier: name)),
'example.' + sample(%w[org com net])
)
end
def username(legacy_specifier = NOT_GIVEN, legacy_separators = NOT_GIVEN, specifier: nil, separators: %w[. _])
warn_for_deprecated_arguments do |keywords|
keywords << :specifier if legacy_specifier != NOT_GIVEN
keywords << :separators if legacy_separators != NOT_GIVEN
end
with_locale(:en) do
return shuffle(specifier.scan(/[[:word:]]+/)).join(sample(separators)).downcase if specifier.respond_to?(:scan)
if specifier.is_a?(Integer)
# If specifier is Integer and has large value, Argument error exception is raised to overcome memory full error
raise ArgumentError, 'Given argument is too large' if specifier > 10**6
tries = 0 # Don't try forever in case we get something like 1_000_000.
result = nil
loop do
result = username(specifier: nil, separators: separators)
tries += 1
break unless result.length < specifier && tries < 7
end
return result * (specifier / result.length + 1) if specifier.positive?
elsif specifier.is_a?(Range)
tries = 0
result = nil
loop do
result = username(specifier: specifier.min, separators: separators)
tries += 1
break unless !specifier.include?(result.length) && tries < 7
end
return result[0...specifier.max]
end
sample([
Char.prepare(Name.first_name),
[Name.first_name, Name.last_name].map do |name|
Char.prepare(name)
end.join(sample(separators))
])
end
end
# rubocop:disable Metrics/ParameterLists
##
# Produces a randomized string of characters suitable for passwords
#
# @param min_length [Integer] The minimum length of the password
# @param max_length [Integer] The maximum length of the password
# @param mix_case [Boolean] Toggles if uppercased letters are allowed. If true, at least one will be added.
# @param special_characters [Boolean] Toggles if special characters are allowed. If true, at least one will be added.
#
# @return [String]
#
# @example
# Faker::Internet.password #=> "Vg5mSvY1UeRg7"
# @example
# Faker::Internet.password(min_length: 8) #=> "YfGjIk0hGzDqS0"
# @example
# Faker::Internet.password(min_length: 10, max_length: 20) #=> "EoC9ShWd1hWq4vBgFw"
# @example
# Faker::Internet.password(min_length: 10, max_length: 20, mix_case: true) #=> "3k5qS15aNmG"
# @example
# Faker::Internet.password(min_length: 10, max_length: 20, mix_case: true, special_characters: true) #=> "*%NkOnJsH4"
#
# @faker.version 2.1.3
def password(legacy_min_length = NOT_GIVEN, legacy_max_length = NOT_GIVEN, legacy_mix_case = NOT_GIVEN, legacy_special_characters = NOT_GIVEN, min_length: 8, max_length: 16, mix_case: true, special_characters: false)
# rubocop:enable Metrics/ParameterLists
warn_for_deprecated_arguments do |keywords|
keywords << :min_length if legacy_min_length != NOT_GIVEN
keywords << :max_length if legacy_max_length != NOT_GIVEN
keywords << :mix_case if legacy_mix_case != NOT_GIVEN
keywords << :special_characters if legacy_special_characters != NOT_GIVEN
end
min_alpha = mix_case ? 2 : 0
temp = Lorem.characters(number: min_length, min_alpha: min_alpha)
diff_length = max_length - min_length
if diff_length.positive?
diff_rand = rand(diff_length + 1)
temp += Lorem.characters(number: diff_rand)
end
if mix_case
alpha_count = 0
temp.chars.each_with_index do |char, index|
if char =~ /[[:alpha:]]/
temp[index] = char.upcase if alpha_count.even?
alpha_count += 1
end
end
end
if special_characters
chars = %w[! @ # $ % ^ & *]
rand(1..min_length).times do |i|
temp[i] = chars[rand(chars.length)]
end
end
temp
end
def domain_name(legacy_subdomain = NOT_GIVEN, subdomain: false, domain: nil)
warn_for_deprecated_arguments do |keywords|
keywords << :subdomain if legacy_subdomain != NOT_GIVEN
end
with_locale(:en) do
if domain
domain
.split('.')
.map { |domain_part| Char.prepare(domain_part) }
.tap do |domain_elements|
domain_elements << domain_suffix if domain_elements.length < 2
domain_elements.unshift(Char.prepare(domain_word)) if subdomain && domain_elements.length < 3
end.join('.')
else
[domain_word, domain_suffix].tap do |domain_elements|
domain_elements.unshift(Char.prepare(domain_word)) if subdomain
end.join('.')
end
end
end
def fix_umlauts(legacy_string = NOT_GIVEN, string: '')
warn_for_deprecated_arguments do |keywords|
keywords << :string if legacy_string != NOT_GIVEN
end
Char.fix_umlauts(string)
end
def domain_word
with_locale(:en) { Char.prepare(Company.name.split(' ').first) }
end
def domain_suffix
fetch('internet.domain_suffix')
end
def mac_address(legacy_prefix = NOT_GIVEN, prefix: '')
warn_for_deprecated_arguments do |keywords|
keywords << :prefix if legacy_prefix != NOT_GIVEN
end
prefix_digits = prefix.split(':').map { |d| d.to_i(16) }
address_digits = Array.new((6 - prefix_digits.size)) { rand(256) }
(prefix_digits + address_digits).map { |d| format('%02x', d) }.join(':')
end
def ip_v4_address
[rand_in_range(0, 255), rand_in_range(0, 255),
rand_in_range(0, 255), rand_in_range(0, 255)].join('.')
end
def private_ip_v4_address
addr = nil
loop do
addr = ip_v4_address
break if private_net_checker[addr]
end
addr
end
def public_ip_v4_address
addr = nil
loop do
addr = ip_v4_address
break unless reserved_net_checker[addr]
end
addr
end
def private_nets_regex
[
/^10\./, # 10.0.0.0 - 10.255.255.255
/^100\.(6[4-9]|[7-9]\d|1[0-1]\d|12[0-7])\./, # 100.64.0.0 - 100.127.255.255
/^127\./, # 127.0.0.0 - 127.255.255.255
/^169\.254\./, # 169.254.0.0 - 169.254.255.255
/^172\.(1[6-9]|2\d|3[0-1])\./, # 172.16.0.0 - 172.31.255.255
/^192\.0\.0\./, # 192.0.0.0 - 192.0.0.255
/^192\.168\./, # 192.168.0.0 - 192.168.255.255
/^198\.(1[8-9])\./ # 198.18.0.0 - 198.19.255.255
]
end
def private_net_checker
->(addr) { private_nets_regex.any? { |net| net =~ addr } }
end
def reserved_nets_regex
[
/^0\./, # 0.0.0.0 - 0.255.255.255
/^192\.0\.2\./, # 192.0.2.0 - 192.0.2.255
/^192\.88\.99\./, # 192.88.99.0 - 192.88.99.255
/^198\.51\.100\./, # 198.51.100.0 - 198.51.100.255
/^203\.0\.113\./, # 203.0.113.0 - 203.0.113.255
/^(22[4-9]|23\d)\./, # 224.0.0.0 - 239.255.255.255
/^(24\d|25[0-5])\./ # 240.0.0.0 - 255.255.255.254 and 255.255.255.255
]
end
def reserved_net_checker
->(addr) { (private_nets_regex + reserved_nets_regex).any? { |net| net =~ addr } }
end
def ip_v4_cidr
"#{ip_v4_address}/#{rand(1..31)}"
end
def ip_v6_address
(1..8).map { rand(65_536).to_s(16) }.join(':')
end
def ip_v6_cidr
"#{ip_v6_address}/#{rand(1..127)}"
end
# rubocop:disable Metrics/ParameterLists
def url(legacy_host = NOT_GIVEN, legacy_path = NOT_GIVEN, legacy_scheme = NOT_GIVEN, host: domain_name, path: "/#{username}", scheme: 'http')
# rubocop:enable Metrics/ParameterLists
warn_for_deprecated_arguments do |keywords|
keywords << :host if legacy_host != NOT_GIVEN
keywords << :path if legacy_path != NOT_GIVEN
keywords << :scheme if legacy_scheme != NOT_GIVEN
end
"#{scheme}://#{host}#{path}"
end
def slug(legacy_words = NOT_GIVEN, legacy_glue = NOT_GIVEN, words: nil, glue: nil)
warn_for_deprecated_arguments do |keywords|
keywords << :words if legacy_words != NOT_GIVEN
keywords << :glue if legacy_glue != NOT_GIVEN
end
glue ||= sample(%w[- _])
(words || Faker::Lorem.words(number: 2).join(' ')).delete(',.').gsub(' ', glue).downcase
end
def device_token
shuffle(rand(16**64).to_s(16).rjust(64, '0').chars.to_a).join
end
def user_agent(legacy_vendor = NOT_GIVEN, vendor: nil)
warn_for_deprecated_arguments do |keywords|
keywords << :vendor if legacy_vendor != NOT_GIVEN
end
agent_hash = translate('faker.internet.user_agent')
agents = vendor.respond_to?(:to_sym) && agent_hash[vendor.to_sym] || agent_hash[sample(agent_hash.keys)]
sample(agents)
end
def uuid
# borrowed from: https://github.com/ruby/ruby/blob/d48783bb0236db505fe1205d1d9822309de53a36/lib/securerandom.rb#L250
ary = Faker::Config.random.bytes(16).unpack('NnnnnN')
ary[2] = (ary[2] & 0x0fff) | 0x4000
ary[3] = (ary[3] & 0x3fff) | 0x8000
'%08x-%04x-%04x-%04x-%04x%08x' % ary # rubocop:disable Style/FormatString
end
##
# Produces a random string of alphabetic characters, (no digits)
#
# @param length [Integer] The length of the string to generate
# @param padding [Boolean] Toggles if a final equal '=' will be added.
# @param urlsafe [Boolean] Toggles charset to '-' and '_' instead of '+' and '/'.
#
# @return [String]
#
# @example
# Faker::Internet.base64
# #=> "r_hbZ2DSD-ZACzZT"
# @example
# Faker::Internet.base64(length: 4, padding: true, urlsafe: false)
# #=> "x1/R="
#
# @faker.version 2.11.0
def base64(length: 16, padding: false, urlsafe: true)
char_range = [
Array('0'..'9'),
Array('A'..'Z'),
Array('a'..'z'),
urlsafe ? %w[- _] : %w[+ /]
].flatten
s = Array.new(length) { sample(char_range) }.join
s += '=' if padding
s
end
alias user_name username
private
def sanitize_email_local_part(local_part)
char_range = [
Array('0'..'9'),
Array('A'..'Z'),
Array('a'..'z'),
"!#$%&'*+-/=?^_`{|}~.".split(//)
].flatten
local_part.split(//).map do |char|
char_range.include?(char) ? char : '#'
end.join
end
def construct_email(local_part, domain_name)
[local_part, domain_name].join('@')
end
end
end
end
| 1 | 10,137 | Nvm. The original version is best... *hides from the angry rubocop* | faker-ruby-faker | rb |
@@ -100,14 +100,13 @@ func (c *client) Call(
return nil, err
}
- body, cleanup, err := marshal(req.Encoding, protoReq)
+ body, err := marshal(req.Encoding, protoReq)
if err != nil {
return nil, yarpcencoding.RequestBodyEncodeError(req, err)
}
- defer cleanup()
reqBuf := &yarpc.Buffer{}
- if _, err := reqBuf.Write(body); err != nil {
+ if _, err := reqBuf.Write(body.Bytes()); err != nil {
return nil, err
}
| 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpcprotobuf
import (
"context"
"github.com/gogo/protobuf/proto"
yarpc "go.uber.org/yarpc/v2"
"go.uber.org/yarpc/v2/yarpcencoding"
"go.uber.org/yarpc/v2/yarpcprocedure"
)
// Client is a protobuf client.
type Client interface {
Call(
ctx context.Context,
method string,
protoReq proto.Message,
protoRes proto.Message,
opts ...yarpc.CallOption,
) (proto.Message, error)
}
// StreamClient is a protobuf client with streaming.
type StreamClient interface {
Client
CallStream(ctx context.Context, method string, opts ...yarpc.CallOption) (*ClientStream, error)
}
type client struct {
c yarpc.Client
encoding yarpc.Encoding
protoService string
}
// NewClient creates a new client.
func NewClient(c yarpc.Client, protoService string, opts ...ClientOption) Client {
return newClient(c, protoService, opts...)
}
// NewStreamClient creates a new stream client.
func NewStreamClient(c yarpc.Client, protoService string, opts ...ClientOption) StreamClient {
return newClient(c, protoService, opts...)
}
func newClient(c yarpc.Client, service string, opts ...ClientOption) *client {
cli := &client{c: c, encoding: Encoding, protoService: service}
for _, o := range opts {
o.apply(cli)
}
return cli
}
func (c *client) CallStream(ctx context.Context, method string, opts ...yarpc.CallOption) (*ClientStream, error) {
call, err := yarpc.NewStreamOutboundCall(opts...)
if err != nil {
return nil, err
}
ctx, req, err := c.toRequest(ctx, method, call)
if err != nil {
return nil, err
}
stream, err := c.c.Stream.CallStream(ctx, req)
if err != nil {
return nil, err
}
return &ClientStream{stream: stream}, nil
}
func (c *client) Call(
ctx context.Context,
method string,
protoReq proto.Message,
protoRes proto.Message,
opts ...yarpc.CallOption,
) (proto.Message, error) {
call := yarpc.NewOutboundCall(opts...)
ctx, req, err := c.toRequest(ctx, method, call)
if err != nil {
return nil, err
}
body, cleanup, err := marshal(req.Encoding, protoReq)
if err != nil {
return nil, yarpcencoding.RequestBodyEncodeError(req, err)
}
defer cleanup()
reqBuf := &yarpc.Buffer{}
if _, err := reqBuf.Write(body); err != nil {
return nil, err
}
res, resBuf, appErr := c.c.Unary.Call(ctx, req, reqBuf)
if res == nil {
return nil, appErr
}
if _, err := call.ReadFromResponse(ctx, res); err != nil {
return nil, err
}
if resBuf != nil {
if err := unmarshal(req.Encoding, resBuf, protoRes); err != nil {
return nil, yarpcencoding.ResponseBodyDecodeError(req, err)
}
}
return protoRes, appErr
}
// toRequest maps the outbound call to its corresponding request.
// Note that the procedure name is derived from the proto service's
// fully-qualified name, combined with the specific method we are
// calling.
//
// Given a "Store" service declared in the "keyvalue" package, the derived
// procedure for the "Get" method would be "keyvalue.Store::Get".
func (c *client) toRequest(ctx context.Context, method string, call *yarpc.OutboundCall) (context.Context, *yarpc.Request, error) {
req := &yarpc.Request{
Caller: c.c.Caller,
Service: c.c.Service,
Procedure: yarpcprocedure.ToName(c.protoService, method),
Encoding: c.encoding,
}
ctx, err := call.WriteToRequest(ctx, req)
if err != nil {
return nil, nil, err
}
return ctx, req, nil
}
| 1 | 18,153 | we can drop this and use the returned body above, right? | yarpc-yarpc-go | go |
@@ -142,12 +142,12 @@ module Travis
def install
sh.if "$(rvm use $(travis_internal_ruby) do ruby -e \"puts RUBY_VERSION\") = 1.9*" do
- cmd(dpl_install_command(WANT_18), echo: false, assert: !allow_failure, timing: true)
+ cmd(dpl_install_command(WANT_18), echo: true, assert: !allow_failure, timing: true)
end
sh.else do
- cmd(dpl_install_command, echo: false, assert: !allow_failure, timing: true)
+ cmd(dpl_install_command, echo: true, assert: !allow_failure, timing: true)
end
- sh.cmd "rm -f dpl-*.gem", echo: false, assert: false, timing: false
+ sh.cmd "rm -f $TRAVIS_BUILD_DIR/dpl-*.gem", echo: false, assert: false, timing: false
end
def run_command(assert = !allow_failure) | 1 | require 'travis/build/addons/deploy/conditions'
require 'travis/build/addons/deploy/config'
module Travis
module Build
class Addons
class Deploy < Base
class Script
VERSIONED_RUNTIMES = %w(
d
dart
elixir
ghc
go
haxe
jdk
julia
mono
node
otp_release
perl
php
python
r
ruby
rust
scala
smalltalk
).map(&:to_sym)
WANT_18 = true # whether or not we want `dpl` < 1.9
attr_accessor :script, :sh, :data, :config, :allow_failure
def initialize(script, sh, data, config)
@script = script
@sh = sh
@data = data
@config = config
@silent = false
@allow_failure = config.delete(:allow_failure)
rescue
raise Travis::Build::DeployConfigError.new
end
def deploy
if data.pull_request
warning_message "the current build is a pull request."
return
end
if conditions.empty?
run
else
check_conditions_and_run
end
end
private
def check_conditions_and_run
sh.if(conditions) do
run
end
sh.else do
warning_message_unless(repo_condition, "this repo's name does not match one specified in .travis.yml's deploy.on.repo: #{on[:repo]}")
warning_message_unless(branch_condition, "this branch is not permitted")
warning_message_unless(runtime_conditions, "this is not on the required runtime")
warning_message_unless(custom_conditions, "a custom condition was not met")
warning_message_unless(tags_condition, "this is not a tagged commit")
end
end
def warning_message_unless(condition, message)
return if negate_condition(condition) == ""
sh.if(negate_condition(condition)) { warning_message(message) }
end
def on
@on ||= begin
on = config.delete(:if) || config.delete(:on) || config.delete(true) || config.delete(:true) || {}
on = { branch: on.to_str } if on.respond_to? :to_str
on[:ruby] ||= on[:rvm] if on.include? :rvm
on[:node] ||= on[:node_js] if on.include? :node_js
on
end
end
def conditions
[
repo_condition,
branch_condition,
runtime_conditions,
custom_conditions,
tags_condition,
].flatten.compact.map { |c| "(#{c})" }.join(" && ")
rescue TypeError => e
if e.message =~ /no implicit conversion of Symbol into Integer/
raise Travis::Build::DeployConditionError.new
end
end
def repo_condition
"$TRAVIS_REPO_SLUG = \"#{on[:repo]}\"" if on[:repo]
end
def branch_condition
return if on[:all_branches] || on[:tags]
branch_config = on[:branch].respond_to?(:keys) ? on[:branch].keys : on[:branch]
branches = Array(branch_config || default_branches)
branches.map { |b| "$TRAVIS_BRANCH = #{b}" }.join(' || ')
end
def tags_condition
case on[:tags]
when true then '"$TRAVIS_TAG" != ""'
when false then '"$TRAVIS_TAG" = ""'
end
end
def custom_conditions
on[:condition]
end
def runtime_conditions
(VERSIONED_RUNTIMES & on.keys).map { |runtime| "$TRAVIS_#{runtime.to_s.upcase}_VERSION = #{on[runtime].to_s.shellescape}" }
end
def run
sh.with_errexit_off do
script.stages.run_stage(:custom, :before_deploy)
sh.fold('dpl.0') { install }
cmd(run_command, echo: false, assert: false, timing: true)
script.stages.run_stage(:custom, :after_deploy)
end
end
def install
sh.if "$(rvm use $(travis_internal_ruby) do ruby -e \"puts RUBY_VERSION\") = 1.9*" do
cmd(dpl_install_command(WANT_18), echo: false, assert: !allow_failure, timing: true)
end
sh.else do
cmd(dpl_install_command, echo: false, assert: !allow_failure, timing: true)
end
sh.cmd "rm -f dpl-*.gem", echo: false, assert: false, timing: false
end
def run_command(assert = !allow_failure)
return "dpl #{options} --fold" unless assert
run_command(false) + "; " + die("failed to deploy")
end
def die(message)
'if [ $? -ne 0 ]; then echo %p; travis_terminate 2; fi' % message
end
def default_branches
default_branches = config.except(:edge).values.grep(Hash).map(&:keys).flatten(1).uniq.compact
default_branches.any? ? default_branches : 'master'
end
def option(key, value)
case value
when Array then value.map { |v| option(key, v) }
when Hash then option(key, value[data.branch.to_sym])
when true then "--#{key}"
when nil, false then nil
else "--%s=%p" % [key, value]
end
end
def cmd(cmd, *args)
sh.cmd('type rvm &>/dev/null || source ~/.rvm/scripts/rvm', echo: false, assert: false)
sh.cmd("rvm $(travis_internal_ruby) --fuzzy do ruby -S #{cmd}", *args)
end
def dpl_install_command(want_pre_19 = false)
edge = config[:edge]
if edge.respond_to? :fetch
src = edge.fetch(:source, 'travis-ci/dpl')
branch = edge.fetch(:branch, 'master')
build_gem_locally_from(src, branch)
end
command = "gem install dpl"
command << " -v '< 1.9' " if want_pre_19
command << "-*.gem --local" if edge == 'local' || edge.respond_to?(:fetch)
command << " --pre" if edge
command
end
def options
config.flat_map { |k,v| option(k,v) }.compact.join(" ")
end
def warning_message(message)
sh.echo "Skipping a deployment with the #{config[:provider]} provider because #{message}", ansi: :yellow
end
def negate_condition(conditions)
Array(conditions).flatten.compact.map { |condition| " ! (#{condition})" }.join(" && ")
end
def build_gem_locally_from(source, branch)
sh.echo "Building dpl gem locally with source #{source} and branch #{branch}", ansi: :yellow
sh.cmd("gem uninstall -a -x dpl >& /dev/null", echo: false, assert: !allow_failure, timing: false)
sh.cmd("pushd /tmp >& /dev/null", echo: false, assert: !allow_failure, timing: true)
sh.cmd("git clone https://github.com/#{source} #{source}", echo: true, assert: !allow_failure, timing: true)
sh.cmd("pushd #{source} >& /dev/null", echo: false, assert: !allow_failure, timing: true)
sh.cmd("git checkout #{branch}", echo: true, assert: !allow_failure, timing: true)
sh.cmd("git show-ref -s HEAD", echo: true, assert: !allow_failure, timing: true)
cmd("gem build dpl.gemspec", echo: true, assert: !allow_failure, timing: true)
sh.cmd("mv dpl-*.gem $TRAVIS_BUILD_DIR >& /dev/null", echo: false, assert: !allow_failure, timing: true)
sh.cmd("popd >& /dev/null", echo: false, assert: !allow_failure, timing: true)
# clean up, so that multiple edge providers can be run
sh.cmd("rm -rf $(dirname #{source})", echo: false, assert: !allow_failure, timing: true)
sh.cmd("popd >& /dev/null", echo: false, assert: !allow_failure, timing: true)
ensure
sh.cmd("test -e /tmp/dpl && rm -rf dpl", echo: false, assert: false, timing: true)
end
end
end
end
end
end
| 1 | 15,826 | Is echoing enabled on purpose here or it's a remainder of your tests? | travis-ci-travis-build | rb |
@@ -98,6 +98,7 @@ module Bolt
end
def run_command(targets, command, options = {}, &callback)
+ @logger.notice(options['_description']) if options.key?('_description')
@logger.info("Starting command run '#{command}' on #{targets.map(&:uri)}")
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as | 1 | # frozen_string_literal: true
# Used for $ERROR_INFO. This *must* be capitalized!
require 'English'
require 'json'
require 'concurrent'
require 'logging'
require 'bolt/result'
require 'bolt/config'
require 'bolt/notifier'
require 'bolt/result_set'
require 'bolt/puppetdb'
module Bolt
class Executor
attr_reader :noop, :transports
attr_accessor :run_as
def initialize(config = Bolt::Config.new, noop = nil, plan_logging = false)
@config = config
@logger = Logging.logger[self]
@transports = Bolt::TRANSPORTS.each_with_object({}) do |(key, val), coll|
coll[key.to_s] = Concurrent::Delay.new { val.new }
end
# If a specific elevated log level has been requested, honor that.
# Otherwise, escalate the log level to "info" if running in plan mode, so
# that certain progress messages will be visible.
default_log_level = plan_logging ? :info : :notice
@logger.level = @config[:log_level] || default_log_level
@noop = noop
@run_as = nil
@pool = Concurrent::CachedThreadPool.new(max_threads: @config[:concurrency])
@logger.debug { "Started with #{@config[:concurrency]} max thread(s)" }
@notifier = Bolt::Notifier.new
end
def transport(transport)
impl = @transports[transport || 'ssh']
# If there was an error creating the transport, ensure it gets thrown
impl.no_error!
impl.value
end
def summary(action, object, result)
fc = result.error_set.length
npl = result.length == 1 ? '' : 's'
fpl = fc == 1 ? '' : 's'
"Ran #{action} '#{object}' on #{result.length} node#{npl} with #{fc} failure#{fpl}"
end
private :summary
# Execute the given block on a list of nodes in parallel, one thread per "batch".
#
# This is the main driver of execution on a list of targets. It first
# groups targets by transport, then divides each group into batches as
# defined by the transport. Each batch, along with the corresponding
# transport, is yielded to the block in turn and the results all collected
# into a single ResultSet.
def batch_execute(targets)
promises = targets.group_by(&:protocol).flat_map do |protocol, protocol_targets|
transport = transport(protocol)
transport.batches(protocol_targets).flat_map do |batch|
batch_promises = Array(batch).each_with_object({}) do |target, h|
h[target] = Concurrent::Promise.new(executor: :immediate)
end
# Pass this argument through to avoid retaining a reference to a
# local variable that will change on the next iteration of the loop.
@pool.post(batch_promises) do |result_promises|
begin
results = yield transport, batch
Array(results).each do |result|
result_promises[result.target].set(result)
end
# NotImplementedError can be thrown if the transport is implemented improperly
rescue StandardError, NotImplementedError => e
result_promises.each do |target, promise|
promise.set(Bolt::Result.from_exception(target, e))
end
ensure
# Make absolutely sure every promise gets a result to avoid a
# deadlock. Use whatever exception is causing this block to
# execute, or generate one if we somehow got here without an
# exception and some promise is still missing a result.
result_promises.each do |target, promise|
next if promise.fulfilled?
error = $ERROR_INFO || Bolt::Error.new("No result was returned for #{target.uri}",
"puppetlabs.bolt/missing-result-error")
promise.set(Bolt::Result.from_exception(target, error))
end
end
end
batch_promises.values
end
end
ResultSet.new(promises.map(&:value))
end
def run_command(targets, command, options = {}, &callback)
@logger.info("Starting command run '#{command}' on #{targets.map(&:uri)}")
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
transport.batch_command(batch, command, options, ¬ify)
end
@logger.info(summary('command', command, results))
@notifier.shutdown
results
end
def run_script(targets, script, arguments, options = {}, &callback)
@logger.info("Starting script run #{script} on #{targets.map(&:uri)}")
@logger.debug("Arguments: #{arguments}")
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
transport.batch_script(batch, script, arguments, options, ¬ify)
end
@logger.info(summary('script', script, results))
@notifier.shutdown
results
end
def run_task(targets, task, arguments, options = {}, &callback)
task_name = task.name
@logger.info("Starting task #{task_name} on #{targets.map(&:uri)}")
@logger.debug("Arguments: #{arguments} Input method: #{task.input_method}")
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
transport.batch_task(batch, task, arguments, options, ¬ify)
end
@logger.info(summary('task', task_name, results))
@notifier.shutdown
results
end
def file_upload(targets, source, destination, options = {}, &callback)
@logger.info("Starting file upload from #{source} to #{destination} on #{targets.map(&:uri)}")
notify = proc { |event| @notifier.notify(callback, event) if callback }
options = { '_run_as' => run_as }.merge(options) if run_as
results = batch_execute(targets) do |transport, batch|
transport.batch_upload(batch, source, destination, options, ¬ify)
end
@logger.info(summary('upload', source, results))
@notifier.shutdown
results
end
def puppetdb_client
return @puppetdb_client if @puppetdb_client
puppetdb_config = Bolt::PuppetDB::Config.new(nil, @config.puppetdb)
@puppetdb_client = Bolt::PuppetDB::Client.from_config(puppetdb_config)
end
def puppetdb_fact(certnames)
puppetdb_client.facts_for_node(certnames)
rescue StandardError => e
raise Bolt::CLIError, "Could not retrieve targets from PuppetDB: #{e}"
end
end
end
| 1 | 8,427 | I think this should just be worked into the next message and follow the verbosity of it. | puppetlabs-bolt | rb |
@@ -106,6 +106,7 @@ export const selectors = {
*
* @since 1.14.0
*
+ * @param {Object} urlParams URL parameters to be passed to the query.
* @return {(string|undefined)} AdSense account site overview URL (or `undefined` if not loaded).
*/
getServiceAccountSiteURL: createRegistrySelector( ( select ) => () => { | 1 | /**
* `modules/adsense` data store: service.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { addQueryArgs } from '@wordpress/url';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { STORE_NAME } from './constants';
import { CORE_USER } from '../../../googlesitekit/datastore/user/constants';
import { CORE_SITE } from '../../../googlesitekit/datastore/site/constants';
import { parseDomain } from '../util/url';
const { createRegistrySelector } = Data;
export const selectors = {
/**
* Gets a URL to the service.
*
* @since 1.14.0
*
* @param {Object} state Data store's state.
* @param {Object} [args] Object containing optional path and query args.
* @param {string} [args.path] A path to append to the base url.
* @param {Object} [args.query] Object of query params to be added to the URL.
* @return {(string|undefined)} The URL to the service, or `undefined` if not loaded.
*/
getServiceURL: createRegistrySelector( ( select ) => ( state, { path, query } = {} ) => {
const userEmail = select( CORE_USER ).getEmail();
if ( userEmail === undefined ) {
return undefined;
}
const baseURI = 'https://www.google.com/adsense/new/u/0';
const queryParams = query ? { ...query, authuser: userEmail } : { authuser: userEmail };
if ( path ) {
const sanitizedPath = `/${ path.replace( /^\//, '' ) }`;
return addQueryArgs( `${ baseURI }${ sanitizedPath }`, queryParams );
}
return addQueryArgs( baseURI, queryParams );
} ),
/**
* Returns the service URL for creating a new AdSense account.
*
* @since 1.14.0
*
* @return {(string|undefined)} AdSense URL to create a new account (or `undefined` if not loaded).
*/
getServiceCreateAccountURL: createRegistrySelector( ( select ) => () => {
const siteURL = select( CORE_SITE ).getReferenceSiteURL();
const query = {
source: 'site-kit',
utm_source: 'site-kit',
utm_medium: 'wordpress_signup',
};
if ( undefined !== siteURL ) {
query.url = siteURL;
}
return addQueryArgs( 'https://www.google.com/adsense/signup/new', query );
} ),
/**
* Returns the service URL to an AdSense account's overview page.
*
* @since 1.14.0
*
* @return {(string|undefined)} AdSense account overview URL (or `undefined` if not loaded).
*/
getServiceAccountURL: createRegistrySelector( ( select ) => () => {
const accountID = select( STORE_NAME ).getAccountID();
if ( accountID === undefined ) {
return undefined;
}
const path = `${ accountID }/home`;
const query = { source: 'site-kit' };
return select( STORE_NAME ).getServiceURL( { path, query } );
} ),
/**
* Returns the service URL to an AdSense account's site overview page.
*
* @since 1.14.0
*
* @return {(string|undefined)} AdSense account site overview URL (or `undefined` if not loaded).
*/
getServiceAccountSiteURL: createRegistrySelector( ( select ) => () => {
const accountID = select( STORE_NAME ).getAccountID();
const siteURL = select( CORE_SITE ).getReferenceSiteURL();
if ( accountID === undefined || siteURL === undefined ) {
return undefined;
}
const path = `${ accountID }/home`;
const query = {
source: 'site-kit',
url: parseDomain( siteURL ) || siteURL,
};
return select( STORE_NAME ).getServiceURL( { path, query } );
} ),
/**
* Returns the service URL to an AdSense account's site management page.
*
* @since 1.14.0
*
* @return {(string|undefined)} AdSense account site management URL (or `undefined` if not loaded).
*/
getServiceAccountManageSiteURL: createRegistrySelector( ( select ) => () => {
const accountID = select( STORE_NAME ).getAccountID();
const siteURL = select( CORE_SITE ).getReferenceSiteURL();
if ( accountID === undefined || siteURL === undefined ) {
return undefined;
}
const path = `${ accountID }/sites/my-sites`;
const query = {
source: 'site-kit',
url: parseDomain( siteURL ) || siteURL,
};
return select( STORE_NAME ).getServiceURL( { path, query } );
} ),
/**
* Returns the service URL to the AdSense sites list.
*
* @since 1.14.0
*
* @return {(string|undefined)} AdSense account sites list URL (or `undefined` if not loaded).
*/
getServiceAccountManageSitesURL: createRegistrySelector( ( select ) => ( ) => {
const accountID = select( STORE_NAME ).getAccountID();
if ( accountID === undefined ) {
return undefined;
}
const path = `${ accountID }/sites/my-sites`;
const query = { source: 'site-kit' };
return select( STORE_NAME ).getServiceURL( { path, query } );
} ),
/**
* Returns the service URL to an AdSense account's site ads preview page.
*
* @since 1.14.0
*
* @return {(string|undefined)} AdSense account site ads preview URL (or `undefined` if not loaded).
*/
getServiceAccountSiteAdsPreviewURL: createRegistrySelector( ( select ) => () => {
const accountID = select( STORE_NAME ).getAccountID();
const siteURL = select( CORE_SITE ).getReferenceSiteURL();
if ( accountID === undefined || siteURL === undefined ) {
return undefined;
}
const path = `${ accountID }/myads/sites/preview`;
const query = {
source: 'site-kit',
url: parseDomain( siteURL ) || siteURL,
};
return select( STORE_NAME ).getServiceURL( { path, query } );
} ),
};
const store = {
selectors,
};
export default store;
| 1 | 35,906 | This also needs to be reverted. | google-site-kit-wp | js |
@@ -105,6 +105,13 @@ const (
TLFJournalBackgroundWorkEnabled
)
+type tlfJournalPauseType int
+
+const (
+ journalPausedFromConflict tlfJournalPauseType = 1 << iota
+ journalPausedFromSignal
+)
+
func (bws TLFJournalBackgroundWorkStatus) String() string {
switch bws {
case TLFJournalBackgroundWorkEnabled: | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"path/filepath"
"runtime"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// tlfJournalConfig is the subset of the Config interface needed by
// tlfJournal (for ease of testing).
type tlfJournalConfig interface {
BlockSplitter() BlockSplitter
Clock() Clock
Codec() kbfscodec.Codec
Crypto() Crypto
BlockCache() BlockCache
BlockOps() BlockOps
MDCache() MDCache
MetadataVersion() MetadataVer
Reporter() Reporter
encryptionKeyGetter() encryptionKeyGetter
mdDecryptionKeyGetter() mdDecryptionKeyGetter
MDServer() MDServer
usernameGetter() normalizedUsernameGetter
MakeLogger(module string) logger.Logger
}
// tlfJournalConfigWrapper is an adapter for Config objects to the
// tlfJournalConfig interface.
type tlfJournalConfigAdapter struct {
Config
}
func (ca tlfJournalConfigAdapter) encryptionKeyGetter() encryptionKeyGetter {
return ca.Config.KeyManager()
}
func (ca tlfJournalConfigAdapter) mdDecryptionKeyGetter() mdDecryptionKeyGetter {
return ca.Config.KeyManager()
}
func (ca tlfJournalConfigAdapter) usernameGetter() normalizedUsernameGetter {
return ca.Config.KBPKI()
}
const (
// Maximum number of blocks that can be flushed in a single batch
// by the journal. TODO: make this configurable, so that users
// can choose how much bandwidth is used by the journal.
maxJournalBlockFlushBatchSize = 25
// This will be the final entry for unflushed paths if there are
// too many revisions to process at once.
incompleteUnflushedPathsMarker = "..."
// ForcedBranchSquashThreshold is the minimum number of MD
// revisions in the journal that will trigger an automatic branch
// conversion (and subsequent resolution).
ForcedBranchSquashThreshold = 20
// Maximum number of blocks to delete from the local saved block
// journal at a time while holding the lock.
maxSavedBlockRemovalsAtATime = uint64(500)
)
// TLFJournalStatus represents the status of a TLF's journal for
// display in diagnostics. It is suitable for encoding directly as
// JSON.
type TLFJournalStatus struct {
Dir string
RevisionStart MetadataRevision
RevisionEnd MetadataRevision
BranchID string
BlockOpCount uint64
UnflushedBytes int64 // (signed because os.FileInfo.Size() is signed)
UnflushedPaths []string
LastFlushErr string `json:",omitempty"`
}
// TLFJournalBackgroundWorkStatus indicates whether a journal should
// be doing background work or not.
type TLFJournalBackgroundWorkStatus int
const (
// TLFJournalBackgroundWorkPaused indicates that the journal
// should not currently be doing background work.
TLFJournalBackgroundWorkPaused TLFJournalBackgroundWorkStatus = iota
// TLFJournalBackgroundWorkEnabled indicates that the journal
// should be doing background work.
TLFJournalBackgroundWorkEnabled
)
func (bws TLFJournalBackgroundWorkStatus) String() string {
switch bws {
case TLFJournalBackgroundWorkEnabled:
return "Background work enabled"
case TLFJournalBackgroundWorkPaused:
return "Background work paused"
default:
return fmt.Sprintf("TLFJournalBackgroundWorkStatus(%d)", bws)
}
}
// bwState indicates the state of the background work goroutine.
type bwState int
const (
bwBusy bwState = iota
bwIdle
bwPaused
)
func (bws bwState) String() string {
switch bws {
case bwBusy:
return "bwBusy"
case bwIdle:
return "bwIdle"
case bwPaused:
return "bwPaused"
default:
return fmt.Sprintf("bwState(%d)", bws)
}
}
// tlfJournalBWDelegate is used by tests to know what the background
// goroutine is doing, and also to enforce a timeout (via the
// context).
type tlfJournalBWDelegate interface {
GetBackgroundContext() context.Context
OnNewState(ctx context.Context, bws bwState)
OnShutdown(ctx context.Context)
}
// A tlfJournal contains all the journals for a (TLF, user, device)
// tuple and controls the synchronization between the objects that are
// adding to those journals (via journalBlockServer or journalMDOps)
// and a background goroutine that flushes journal entries to the
// servers.
//
// The maximum number of characters added to the root dir by a TLF
// journal is 51, which just the max of the block journal and MD
// journal numbers.
type tlfJournal struct {
uid keybase1.UID
key kbfscrypto.VerifyingKey
tlfID tlf.ID
dir string
config tlfJournalConfig
delegateBlockServer BlockServer
log logger.Logger
deferLog logger.Logger
onBranchChange branchChangeListener
onMDFlush mdFlushListener
// All the channels below are used as simple on/off
// signals. They're buffered for one object, and all sends are
// asynchronous, so multiple sends get collapsed into one
// signal.
hasWorkCh chan struct{}
needPauseCh chan struct{}
needResumeCh chan struct{}
needShutdownCh chan struct{}
// This channel is closed when background work shuts down.
backgroundShutdownCh chan struct{}
// Serializes all flushes.
flushLock sync.Mutex
// Tracks background work.
wg kbfssync.RepeatedWaitGroup
// Protects all operations on blockJournal and mdJournal.
//
// TODO: Consider using https://github.com/pkg/singlefile
// instead.
journalLock sync.RWMutex
// both of these are nil after shutdown() is called.
blockJournal *blockJournal
mdJournal *mdJournal
disabled bool
lastFlushErr error
unflushedPaths unflushedPathCache
bwDelegate tlfJournalBWDelegate
}
func getTLFJournalInfoFilePath(dir string) string {
return filepath.Join(dir, "info.json")
}
// tlfJournalInfo is the structure stored in
// getTLFJournalInfoFilePath(dir).
type tlfJournalInfo struct {
UID keybase1.UID
VerifyingKey kbfscrypto.VerifyingKey
TlfID tlf.ID
}
func readTLFJournalInfoFile(dir string) (
keybase1.UID, kbfscrypto.VerifyingKey, tlf.ID, error) {
var info tlfJournalInfo
err := ioutil.DeserializeFromJSONFile(
getTLFJournalInfoFilePath(dir), &info)
if err != nil {
return keybase1.UID(""), kbfscrypto.VerifyingKey{}, tlf.ID{}, err
}
return info.UID, info.VerifyingKey, info.TlfID, nil
}
func writeTLFJournalInfoFile(dir string, uid keybase1.UID,
key kbfscrypto.VerifyingKey, tlfID tlf.ID) error {
info := tlfJournalInfo{uid, key, tlfID}
return ioutil.SerializeToJSONFile(info, getTLFJournalInfoFilePath(dir))
}
func makeTLFJournal(
ctx context.Context, uid keybase1.UID, key kbfscrypto.VerifyingKey,
dir string, tlfID tlf.ID, config tlfJournalConfig,
delegateBlockServer BlockServer, bws TLFJournalBackgroundWorkStatus,
bwDelegate tlfJournalBWDelegate, onBranchChange branchChangeListener,
onMDFlush mdFlushListener) (*tlfJournal, error) {
if uid == keybase1.UID("") {
return nil, errors.New("Empty user")
}
if key == (kbfscrypto.VerifyingKey{}) {
return nil, errors.New("Empty verifying key")
}
if tlfID == (tlf.ID{}) {
return nil, errors.New("Empty tlf.ID")
}
readUID, readKey, readTlfID, err := readTLFJournalInfoFile(dir)
switch {
case ioutil.IsNotExist(err):
// Info file doesn't exist, so write it.
err := writeTLFJournalInfoFile(dir, uid, key, tlfID)
if err != nil {
return nil, err
}
case err != nil:
return nil, err
default:
// Info file exists, so it should match passed-in
// parameters.
if uid != readUID {
return nil, errors.Errorf(
"Expected UID %s, got %s", uid, readUID)
}
if key != readKey {
return nil, errors.Errorf(
"Expected verifying key %s, got %s",
key, readKey)
}
if tlfID != readTlfID {
return nil, errors.Errorf(
"Expected TLF ID %s, got %s", tlfID, readTlfID)
}
}
log := config.MakeLogger("TLFJ")
blockJournal, err := makeBlockJournal(ctx, config.Codec(), dir, log)
if err != nil {
return nil, err
}
mdJournal, err := makeMDJournal(
ctx, uid, key, config.Codec(), config.Crypto(), config.Clock(),
tlfID, config.MetadataVersion(), dir, log)
if err != nil {
return nil, err
}
j := &tlfJournal{
uid: uid,
key: key,
tlfID: tlfID,
dir: dir,
config: config,
delegateBlockServer: delegateBlockServer,
log: log,
deferLog: log.CloneWithAddedDepth(1),
onBranchChange: onBranchChange,
onMDFlush: onMDFlush,
hasWorkCh: make(chan struct{}, 1),
needPauseCh: make(chan struct{}, 1),
needResumeCh: make(chan struct{}, 1),
needShutdownCh: make(chan struct{}, 1),
backgroundShutdownCh: make(chan struct{}),
blockJournal: blockJournal,
mdJournal: mdJournal,
bwDelegate: bwDelegate,
}
go j.doBackgroundWorkLoop(bws, backoff.NewExponentialBackOff())
// Signal work to pick up any existing journal entries.
j.signalWork()
j.log.CDebugf(ctx, "Enabled journal for %s with path %s", tlfID, dir)
return j, nil
}
func (j *tlfJournal) signalWork() {
j.wg.Add(1)
select {
case j.hasWorkCh <- struct{}{}:
default:
j.wg.Done()
}
}
// CtxJournalTagKey is the type used for unique context tags within
// background journal work.
type CtxJournalTagKey int
const (
// CtxJournalIDKey is the type of the tag for unique operation IDs
// within background journal work.
CtxJournalIDKey CtxJournalTagKey = iota
)
// CtxJournalOpID is the display name for the unique operation
// enqueued journal ID tag.
const CtxJournalOpID = "JID"
// doBackgroundWorkLoop is the main function for the background
// goroutine. It spawns off a worker goroutine to call
// doBackgroundWork whenever there is work, and can be paused and
// resumed.
func (j *tlfJournal) doBackgroundWorkLoop(
bws TLFJournalBackgroundWorkStatus, retry backoff.BackOff) {
ctx := context.Background()
if j.bwDelegate != nil {
ctx = j.bwDelegate.GetBackgroundContext()
}
// Non-nil when a retry has been scheduled for the future.
var retryTimer *time.Timer
defer func() {
close(j.backgroundShutdownCh)
if j.bwDelegate != nil {
j.bwDelegate.OnShutdown(ctx)
}
if retryTimer != nil {
retryTimer.Stop()
}
}()
// Below we have a state machine with three states:
//
// 1) Idle, where we wait for new work or to be paused;
// 2) Busy, where we wait for the worker goroutine to
// finish, or to be paused;
// 3) Paused, where we wait to be resumed.
//
// We run this state machine until we are shutdown. Also, if
// we exit the busy state for any reason other than the worker
// goroutine finished, we stop the worker goroutine (via
// bwCancel below).
// errCh and bwCancel are non-nil only when we're in the busy
// state. errCh is the channel on which we receive the error
// from the worker goroutine, and bwCancel is the CancelFunc
// corresponding to the context passed to the worker
// goroutine.
var errCh <-chan error
var bwCancel context.CancelFunc
// Handle the case where we panic while in the busy state.
defer func() {
if bwCancel != nil {
bwCancel()
}
}()
for {
ctx := ctxWithRandomIDReplayable(ctx, CtxJournalIDKey, CtxJournalOpID,
j.log)
switch {
case bws == TLFJournalBackgroundWorkEnabled && errCh == nil:
// If we're now on a branch, pause. This will pause
// until PruneBranch or ResolveBranch is called.
isConflict, err := j.isOnConflictBranch()
if err != nil {
j.log.CDebugf(ctx, "Couldn't get conflict status: %v", err)
return
}
if isConflict {
j.log.CDebugf(ctx,
"Pausing work while on conflict branch for %s", j.tlfID)
bws = TLFJournalBackgroundWorkPaused
break
}
// 1) Idle.
if j.bwDelegate != nil {
j.bwDelegate.OnNewState(ctx, bwIdle)
}
j.log.CDebugf(
ctx, "Waiting for the work signal for %s",
j.tlfID)
select {
case <-j.hasWorkCh:
j.log.CDebugf(ctx, "Got work signal for %s", j.tlfID)
if retryTimer != nil {
retryTimer.Stop()
retryTimer = nil
}
bwCtx, cancel := context.WithCancel(ctx)
errCh = j.doBackgroundWork(bwCtx)
bwCancel = cancel
case <-j.needPauseCh:
j.log.CDebugf(ctx,
"Got pause signal for %s", j.tlfID)
bws = TLFJournalBackgroundWorkPaused
case <-j.needShutdownCh:
j.log.CDebugf(ctx,
"Got shutdown signal for %s", j.tlfID)
return
}
case bws == TLFJournalBackgroundWorkEnabled && errCh != nil:
// 2) Busy.
if j.bwDelegate != nil {
j.bwDelegate.OnNewState(ctx, bwBusy)
}
j.log.CDebugf(ctx,
"Waiting for background work to be done for %s",
j.tlfID)
needShutdown := false
select {
case err := <-errCh:
if retryTimer != nil {
panic("Retry timer should be nil after work is done")
}
if err != nil {
j.log.CWarningf(ctx,
"Background work error for %s: %v",
j.tlfID, err)
bTime := retry.NextBackOff()
if bTime != backoff.Stop {
j.log.CWarningf(ctx, "Retrying in %s", bTime)
retryTimer = time.AfterFunc(bTime, j.signalWork)
}
} else {
retry.Reset()
}
case <-j.needPauseCh:
j.log.CDebugf(ctx,
"Got pause signal for %s", j.tlfID)
bws = TLFJournalBackgroundWorkPaused
case <-j.needShutdownCh:
j.log.CDebugf(ctx,
"Got shutdown signal for %s", j.tlfID)
needShutdown = true
}
errCh = nil
// Cancel the worker goroutine as we exit this
// state.
bwCancel()
bwCancel = nil
if needShutdown {
return
}
case bws == TLFJournalBackgroundWorkPaused:
// 3) Paused
j.wg.Pause()
if j.bwDelegate != nil {
j.bwDelegate.OnNewState(ctx, bwPaused)
}
j.log.CDebugf(
ctx, "Waiting to resume background work for %s",
j.tlfID)
select {
case <-j.needResumeCh:
j.log.CDebugf(ctx,
"Got resume signal for %s", j.tlfID)
bws = TLFJournalBackgroundWorkEnabled
case <-j.needShutdownCh:
j.log.CDebugf(ctx,
"Got shutdown signal for %s", j.tlfID)
return
}
default:
j.log.CErrorf(
ctx, "Unknown TLFJournalBackgroundStatus %s",
bws)
return
}
}
}
// doBackgroundWork currently only does auto-flushing. It assumes that
// ctx is canceled when the background processing should stop.
//
// TODO: Handle garbage collection too.
func (j *tlfJournal) doBackgroundWork(ctx context.Context) <-chan error {
errCh := make(chan error, 1)
// TODO: Handle panics.
go func() {
defer j.wg.Done()
errCh <- j.flush(ctx)
}()
return errCh
}
// We don't guarantee that pause/resume requests will be processed in
// strict FIFO order. In particular, multiple pause requests are
// collapsed into one (also multiple resume requests), so it's
// possible that a pause-resume-pause sequence will be processed as
// pause-resume. But that's okay, since these are just for infrequent
// ad-hoc testing.
func (j *tlfJournal) pauseBackgroundWork() {
select {
case j.needPauseCh <- struct{}{}:
default:
}
}
func (j *tlfJournal) resumeBackgroundWork() {
select {
case j.needResumeCh <- struct{}{}:
// Resume the wait group right away, so future callers will block
// even before the background goroutine picks up this signal.
j.wg.Resume()
default:
}
}
func (j *tlfJournal) checkEnabledLocked() error {
if j.blockJournal == nil || j.mdJournal == nil {
return errors.WithStack(errTLFJournalShutdown{})
}
if j.disabled {
return errors.WithStack(errTLFJournalDisabled{})
}
return nil
}
func (j *tlfJournal) getJournalEnds(ctx context.Context) (
blockEnd journalOrdinal, mdEnd MetadataRevision, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, MetadataRevisionUninitialized, err
}
blockEnd, err = j.blockJournal.end()
if err != nil {
return 0, 0, err
}
mdEnd, err = j.mdJournal.end()
if err != nil {
return 0, 0, err
}
return blockEnd, mdEnd, nil
}
func (j *tlfJournal) flush(ctx context.Context) (err error) {
j.flushLock.Lock()
defer j.flushLock.Unlock()
flushedBlockEntries := 0
flushedMDEntries := 0
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Flushed %d block entries and %d MD entries "+
"for %s, but got error %v",
flushedBlockEntries, flushedMDEntries,
j.tlfID, err)
}
j.journalLock.Lock()
j.lastFlushErr = err
j.journalLock.Unlock()
}()
// TODO: Avoid starving flushing MD ops if there are many
// block ops. See KBFS-1502.
for {
isConflict, err := j.isOnConflictBranch()
if err != nil {
return err
}
if isConflict {
j.log.CDebugf(ctx, "Ignoring flush while on conflict branch")
return nil
}
converted, err := j.convertMDsToBranchIfOverThreshold(ctx)
if err != nil {
return err
}
if converted {
return nil
}
blockEnd, mdEnd, err := j.getJournalEnds(ctx)
if err != nil {
return err
}
if blockEnd == 0 && mdEnd == MetadataRevisionUninitialized {
j.log.CDebugf(ctx, "Nothing else to flush")
break
}
j.log.CDebugf(ctx, "Flushing up to blockEnd=%d and mdEnd=%d",
blockEnd, mdEnd)
// Flush the block journal ops in parallel.
numFlushed, maxMDRevToFlush, err := j.flushBlockEntries(ctx, blockEnd)
if err != nil {
return err
}
flushedBlockEntries += numFlushed
if numFlushed == 0 {
// There were no blocks to flush, so we can flush all of
// the remaining MDs.
maxMDRevToFlush = mdEnd
}
// TODO: Flush MDs in batch.
for {
flushed, err := j.flushOneMDOp(ctx, mdEnd, maxMDRevToFlush)
if err != nil {
return err
}
if !flushed {
break
}
flushedMDEntries++
}
}
j.log.CDebugf(ctx, "Flushed %d block entries and %d MD entries for %s",
flushedBlockEntries, flushedMDEntries, j.tlfID)
return nil
}
type errTLFJournalShutdown struct{}
func (e errTLFJournalShutdown) Error() string {
return "tlfJournal is shutdown"
}
type errTLFJournalDisabled struct{}
func (e errTLFJournalDisabled) Error() string {
return "tlfJournal is disabled"
}
type errTLFJournalNotEmpty struct{}
func (e errTLFJournalNotEmpty) Error() string {
return "tlfJournal is not empty"
}
func (j *tlfJournal) getNextBlockEntriesToFlush(
ctx context.Context, end journalOrdinal) (
entries blockEntriesToFlush, maxMDRevToFlush MetadataRevision, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return blockEntriesToFlush{}, MetadataRevisionUninitialized, err
}
return j.blockJournal.getNextEntriesToFlush(ctx, end,
maxJournalBlockFlushBatchSize)
}
func (j *tlfJournal) removeFlushedBlockEntries(ctx context.Context,
entries blockEntriesToFlush) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
// Keep the flushed blocks around until we know for sure the MD
// flush will succeed; otherwise if we become unmerged, conflict
// resolution will be very expensive.
err := j.blockJournal.saveBlocksUntilNextMDFlush()
if err != nil {
return err
}
return j.blockJournal.removeFlushedEntries(ctx, entries, j.tlfID,
j.config.Reporter())
}
func (j *tlfJournal) flushBlockEntries(
ctx context.Context, end journalOrdinal) (int, MetadataRevision, error) {
entries, maxMDRevToFlush, err := j.getNextBlockEntriesToFlush(ctx, end)
if err != nil {
return 0, MetadataRevisionUninitialized, err
}
if entries.length() == 0 {
return 0, maxMDRevToFlush, nil
}
// TODO: fill this in for logging/error purposes.
var tlfName CanonicalTlfName
err = flushBlockEntries(ctx, j.log, j.delegateBlockServer,
j.config.BlockCache(), j.config.Reporter(),
j.tlfID, tlfName, entries)
if err != nil {
return 0, MetadataRevisionUninitialized, err
}
err = j.removeFlushedBlockEntries(ctx, entries)
if err != nil {
return 0, MetadataRevisionUninitialized, err
}
return entries.length(), maxMDRevToFlush, nil
}
func (j *tlfJournal) getNextMDEntryToFlush(ctx context.Context,
end MetadataRevision) (MdID, *RootMetadataSigned, ExtraMetadata, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return MdID{}, nil, nil, err
}
return j.mdJournal.getNextEntryToFlush(ctx, end, j.config.Crypto())
}
func (j *tlfJournal) convertMDsToBranchLocked(
ctx context.Context, bid BranchID) error {
err := j.mdJournal.convertToBranch(
ctx, bid, j.config.Crypto(), j.config.Codec(), j.tlfID,
j.config.MDCache())
if err != nil {
return err
}
if j.onBranchChange != nil {
j.onBranchChange.onTLFBranchChange(j.tlfID, bid)
}
return nil
}
func (j *tlfJournal) convertMDsToBranch(ctx context.Context) error {
bid, err := j.config.Crypto().MakeRandomBranchID()
if err != nil {
return err
}
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
return j.convertMDsToBranchLocked(ctx, bid)
}
func (j *tlfJournal) convertMDsToBranchIfOverThreshold(ctx context.Context) (
bool, error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return false, err
}
ok, err := j.mdJournal.atLeastNNonLocalSquashes(ForcedBranchSquashThreshold)
if err != nil {
return false, err
}
if !ok {
// Most of what's in the log are already local squashes, so no
// need to squash it more.
return false, nil
}
j.log.CDebugf(ctx, "Converting journal with more than %d "+
"non-local-squash entries to a branch", ForcedBranchSquashThreshold)
err = j.convertMDsToBranchLocked(ctx, PendingLocalSquashBranchID)
if err != nil {
return false, err
}
return true, nil
}
func (j *tlfJournal) doOnMDFlush(ctx context.Context,
rmds *RootMetadataSigned) error {
if j.onMDFlush != nil {
j.onMDFlush.onMDFlush(rmds.MD.TlfID(), rmds.MD.BID(),
rmds.MD.RevisionNumber())
}
// Remove saved blocks in chunks to avoid starving foreground file
// system operations that need the lock for too long.
lastToRemove := journalOrdinal(0)
for {
err := func() error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
var err error
lastToRemove, err = j.blockJournal.onMDFlush(
ctx, maxSavedBlockRemovalsAtATime, lastToRemove)
if err != nil {
return err
}
return nil
}()
if err != nil {
return err
}
if lastToRemove == 0 {
break
}
// Explicitly allow other goroutines (such as foreground file
// system operations) to grab the lock to avoid starvation.
// See https://github.com/golang/go/issues/13086.
runtime.Gosched()
}
return nil
}
func (j *tlfJournal) removeFlushedMDEntry(ctx context.Context,
mdID MdID, rmds *RootMetadataSigned) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
if err := j.mdJournal.removeFlushedEntry(ctx, mdID, rmds); err != nil {
return err
}
j.unflushedPaths.removeFromCache(rmds.MD.RevisionNumber())
return nil
}
func (j *tlfJournal) flushOneMDOp(
ctx context.Context, end MetadataRevision,
maxMDRevToFlush MetadataRevision) (flushed bool, err error) {
j.log.CDebugf(ctx, "Flushing one MD to server")
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx, "Flush failed with %v", err)
}
}()
mdServer := j.config.MDServer()
mdID, rmds, extra, err := j.getNextMDEntryToFlush(ctx, end)
if err != nil {
return false, err
}
if mdID == (MdID{}) {
return false, nil
}
// Only flush MDs for which the blocks have been fully flushed.
if rmds.MD.RevisionNumber() > maxMDRevToFlush {
j.log.CDebugf(ctx, "Haven't flushed all the blocks for TLF=%s "+
"with id=%s, rev=%s, bid=%s yet (maxMDRevToFlush=%d)",
rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID(),
maxMDRevToFlush)
return false, nil
}
j.log.CDebugf(ctx, "Flushing MD for TLF=%s with id=%s, rev=%s, bid=%s",
rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID())
pushErr := mdServer.Put(ctx, rmds, extra)
if isRevisionConflict(pushErr) {
headMdID, err := getMdID(ctx, mdServer, j.config.Crypto(),
rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.MergedStatus(),
rmds.MD.RevisionNumber())
if err != nil {
j.log.CWarningf(ctx,
"getMdID failed for TLF %s, BID %s, and revision %d: %v",
rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.RevisionNumber(), err)
} else if headMdID == mdID {
if headMdID == (MdID{}) {
panic("nil earliestID and revision conflict error returned by pushEarliestToServer")
}
// We must have already flushed this MD, so continue.
pushErr = nil
} else if rmds.MD.MergedStatus() == Merged {
j.log.CDebugf(ctx, "Conflict detected %v", pushErr)
// Convert MDs to a branch and return -- the journal
// pauses until the resolution is complete.
err = j.convertMDsToBranch(ctx)
if err != nil {
return false, err
}
return false, nil
}
}
if pushErr != nil {
return false, pushErr
}
err = j.doOnMDFlush(ctx, rmds)
if err != nil {
return false, err
}
err = j.removeFlushedMDEntry(ctx, mdID, rmds)
if err != nil {
return false, err
}
return true, nil
}
func (j *tlfJournal) getJournalEntryCounts() (
blockEntryCount, mdEntryCount uint64, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, 0, err
}
blockEntryCount, err = j.blockJournal.length()
if err != nil {
return 0, 0, err
}
mdEntryCount, err = j.mdJournal.length()
if err != nil {
return 0, 0, err
}
return blockEntryCount, mdEntryCount, nil
}
func (j *tlfJournal) isOnConflictBranch() (bool, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return false, err
}
return j.mdJournal.getBranchID() != NullBranchID, nil
}
func (j *tlfJournal) getJournalStatusLocked() (TLFJournalStatus, error) {
if err := j.checkEnabledLocked(); err != nil {
return TLFJournalStatus{}, err
}
earliestRevision, err := j.mdJournal.readEarliestRevision()
if err != nil {
return TLFJournalStatus{}, err
}
latestRevision, err := j.mdJournal.readLatestRevision()
if err != nil {
return TLFJournalStatus{}, err
}
blockEntryCount, err := j.blockJournal.length()
if err != nil {
return TLFJournalStatus{}, err
}
lastFlushErr := ""
if j.lastFlushErr != nil {
lastFlushErr = j.lastFlushErr.Error()
}
unflushedBytes := j.blockJournal.getUnflushedBytes()
return TLFJournalStatus{
Dir: j.dir,
BranchID: j.mdJournal.getBranchID().String(),
RevisionStart: earliestRevision,
RevisionEnd: latestRevision,
BlockOpCount: blockEntryCount,
UnflushedBytes: unflushedBytes,
LastFlushErr: lastFlushErr,
}, nil
}
func (j *tlfJournal) getJournalStatus() (TLFJournalStatus, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
return j.getJournalStatusLocked()
}
// getJournalStatusWithRange returns the journal status, and either a
// non-nil unflushedPathsMap is returned, which can be used directly
// to fill in UnflushedPaths, or a list of ImmutableBareRootMetadatas
// is returned (along with a bool indicating whether that list is
// complete), which can be used to build an unflushedPathsMap. If
// complete is true, then the list may be empty; otherwise, it is
// guaranteed to not be empty.
func (j *tlfJournal) getJournalStatusWithRange() (
jStatus TLFJournalStatus, unflushedPaths unflushedPathsMap,
ibrmds []ImmutableBareRootMetadata, complete bool, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
jStatus, err = j.getJournalStatusLocked()
if err != nil {
return TLFJournalStatus{}, nil, nil, false, err
}
unflushedPaths = j.unflushedPaths.getUnflushedPaths()
if unflushedPaths != nil {
return jStatus, unflushedPaths, nil, true, nil
}
if jStatus.RevisionEnd == MetadataRevisionUninitialized {
return jStatus, nil, nil, true, nil
}
complete = true
stop := jStatus.RevisionEnd
if stop > jStatus.RevisionStart+1000 {
stop = jStatus.RevisionStart + 1000
complete = false
}
// It would be nice to avoid getting this range if we are not
// the initializer, but at this point we don't know if we'll
// need to initialize or not.
ibrmds, err = j.mdJournal.getRange(j.mdJournal.branchID,
jStatus.RevisionStart, stop)
if err != nil {
return TLFJournalStatus{}, nil, nil, false, err
}
return jStatus, nil, ibrmds, complete, nil
}
// getUnflushedPathMDInfos converts the given list of bare root
// metadatas into unflushedPathMDInfo objects. The caller must NOT
// hold `j.journalLock`, because blocks from the journal may need to
// be read as part of the decryption.
func (j *tlfJournal) getUnflushedPathMDInfos(ctx context.Context,
ibrmds []ImmutableBareRootMetadata) ([]unflushedPathMDInfo, error) {
if len(ibrmds) == 0 {
return nil, nil
}
ibrmdBareHandle, err := ibrmds[0].MakeBareTlfHandleWithExtra()
if err != nil {
return nil, err
}
handle, err := MakeTlfHandle(
ctx, ibrmdBareHandle, j.config.usernameGetter())
if err != nil {
return nil, err
}
mdInfos := make([]unflushedPathMDInfo, 0, len(ibrmds))
for _, ibrmd := range ibrmds {
// TODO: Avoid having to do this type assertion and
// convert to RootMetadata.
brmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata)
if !ok {
return nil, MutableBareRootMetadataNoImplError{}
}
rmd := makeRootMetadata(brmd, ibrmd.extra, handle)
pmd, err := decryptMDPrivateData(
ctx, j.config.Codec(), j.config.Crypto(),
j.config.BlockCache(), j.config.BlockOps(),
j.config.mdDecryptionKeyGetter(), j.uid,
rmd.GetSerializedPrivateMetadata(), rmd, rmd, j.log)
if err != nil {
return nil, err
}
mdInfo := unflushedPathMDInfo{
revision: ibrmd.RevisionNumber(),
kmd: rmd,
pmd: pmd,
localTimestamp: ibrmd.localTimestamp,
}
mdInfos = append(mdInfos, mdInfo)
}
return mdInfos, nil
}
func (j *tlfJournal) getJournalStatusWithPaths(ctx context.Context,
cpp chainsPathPopulator) (jStatus TLFJournalStatus, err error) {
// This loop is limited only by the lifetime of `ctx`.
var unflushedPaths unflushedPathsMap
var complete bool
for {
var ibrmds []ImmutableBareRootMetadata
jStatus, unflushedPaths, ibrmds, complete, err =
j.getJournalStatusWithRange()
if err != nil {
return TLFJournalStatus{}, err
}
if unflushedPaths != nil {
break
}
// We need to make or initialize the unflushed paths.
if !complete {
// Figure out the paths for the truncated MD range,
// but don't cache it.
unflushedPaths = make(unflushedPathsMap)
j.log.CDebugf(ctx, "Making incomplete unflushed path cache")
mdInfos, err := j.getUnflushedPathMDInfos(ctx, ibrmds)
if err != nil {
return TLFJournalStatus{}, err
}
err = addUnflushedPaths(ctx, j.uid, j.key,
j.config.Codec(), j.log, mdInfos, cpp,
unflushedPaths)
if err != nil {
return TLFJournalStatus{}, err
}
break
}
// We need to init it ourselves, or wait for someone else
// to do it.
doInit, err := j.unflushedPaths.startInitializeOrWait(ctx)
if err != nil {
return TLFJournalStatus{}, err
}
if doInit {
initSuccess := false
defer func() {
if !initSuccess || err != nil {
j.unflushedPaths.abortInitialization()
}
}()
mdInfos, err := j.getUnflushedPathMDInfos(ctx, ibrmds)
if err != nil {
return TLFJournalStatus{}, err
}
unflushedPaths, initSuccess, err = j.unflushedPaths.initialize(
ctx, j.uid, j.key, j.config.Codec(),
j.log, cpp, mdInfos)
if err != nil {
return TLFJournalStatus{}, err
}
// All done!
break
}
j.log.CDebugf(ctx, "Waited for unflushed paths initialization, "+
"trying again to get the status")
}
pathsSeen := make(map[string]bool)
for _, revPaths := range unflushedPaths {
for path := range revPaths {
if !pathsSeen[path] {
jStatus.UnflushedPaths = append(jStatus.UnflushedPaths, path)
pathsSeen[path] = true
}
}
}
if !complete {
jStatus.UnflushedPaths =
append(jStatus.UnflushedPaths, incompleteUnflushedPathsMarker)
}
return jStatus, nil
}
func (j *tlfJournal) getUnflushedBytes() (int64, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, err
}
return j.blockJournal.getUnflushedBytes(), nil
}
func (j *tlfJournal) shutdown() {
select {
case j.needShutdownCh <- struct{}{}:
default:
}
<-j.backgroundShutdownCh
// This may happen before the background goroutine finishes,
// but that's ok.
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
// Already shutdown.
return
}
// Make further accesses error out.
j.blockJournal = nil
j.mdJournal = nil
}
// disable prevents new operations from hitting the journal. Will
// fail unless the journal is completely empty.
func (j *tlfJournal) disable() (wasEnabled bool, err error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
err = j.checkEnabledLocked()
switch errors.Cause(err).(type) {
case nil:
// Continue.
break
case errTLFJournalDisabled:
// Already disabled.
return false, nil
default:
return false, err
}
blockEntryCount, err := j.blockJournal.length()
if err != nil {
return false, err
}
mdEntryCount, err := j.mdJournal.length()
if err != nil {
return false, err
}
// You can only disable an empty journal.
if blockEntryCount > 0 || mdEntryCount > 0 {
return false, errors.WithStack(errTLFJournalNotEmpty{})
}
j.disabled = true
return true, nil
}
func (j *tlfJournal) enable() error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
err := j.checkEnabledLocked()
switch errors.Cause(err).(type) {
case nil:
// Already enabled.
return nil
case errTLFJournalDisabled:
// Continue.
break
default:
return err
}
j.disabled = false
return nil
}
// All the functions below just do the equivalent blockJournal or
// mdJournal function under j.journalLock.
// getBlockData doesn't take a block context param, unlike the remote
// block server, since we still want to serve blocks even if all local
// references have been deleted (for example, a block that's been
// flushed but is still being and served on disk until the next
// successful MD flush). This is safe because the journal doesn't
// support removing references for anything other than a flush (see
// the comment in tlfJournal.removeBlockReferences).
func (j *tlfJournal) getBlockData(id kbfsblock.ID) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return j.blockJournal.getData(id)
}
func (j *tlfJournal) putBlockData(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.blockJournal.putData(ctx, id, context, buf, serverHalf)
if err != nil {
return err
}
j.config.Reporter().NotifySyncStatus(ctx, &keybase1.FSPathSyncStatus{
PublicTopLevelFolder: j.tlfID.IsPublic(),
// Path: TODO,
// TODO: should this be the complete total for the file/directory,
// rather than the diff?
SyncingBytes: int64(len(buf)),
// SyncingOps: TODO,
})
j.signalWork()
return nil
}
func (j *tlfJournal) addBlockReference(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.blockJournal.addReference(ctx, id, context)
if err != nil {
return err
}
j.signalWork()
return nil
}
func (j *tlfJournal) removeBlockReferences(
ctx context.Context, contexts kbfsblock.ContextMap) (
liveCounts map[kbfsblock.ID]int, err error) {
// Currently the block journal will still serve block data even if
// all journal references to a block have been removed (i.e.,
// because they have all been flushed to the remote server). If
// we ever need to support the `BlockServer.RemoveReferences` call
// in the journal, we might need to change the journal so that it
// marks blocks as flushed-but-still-readable, so that we can
// distinguish them from blocks that has had all its references
// removed and shouldn't be served anymore. For now, just fail
// this call to make sure no uses of it creep in.
return nil, errors.Errorf(
"Removing block references is currently unsupported in the journal")
}
func (j *tlfJournal) archiveBlockReferences(
ctx context.Context, contexts kbfsblock.ContextMap) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.blockJournal.archiveReferences(ctx, contexts)
if err != nil {
return err
}
j.signalWork()
return nil
}
func (j *tlfJournal) isBlockUnflushed(id kbfsblock.ID) (bool, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return false, err
}
return j.blockJournal.isUnflushed(id)
}
func (j *tlfJournal) getBranchID() (BranchID, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return NullBranchID, err
}
return j.mdJournal.branchID, nil
}
func (j *tlfJournal) getMDHead(
ctx context.Context, bid BranchID) (ImmutableBareRootMetadata, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return ImmutableBareRootMetadata{}, err
}
return j.mdJournal.getHead(bid)
}
func (j *tlfJournal) getMDRange(
ctx context.Context, bid BranchID, start, stop MetadataRevision) (
[]ImmutableBareRootMetadata, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return nil, err
}
return j.mdJournal.getRange(bid, start, stop)
}
func (j *tlfJournal) doPutMD(ctx context.Context, rmd *RootMetadata,
mdInfo unflushedPathMDInfo,
perRevMap unflushedPathsPerRevMap) (
mdID MdID, retryPut bool, err error) {
// Now take the lock and put the MD, merging in the unflushed
// paths while under the lock.
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return MdID{}, false, err
}
if !j.unflushedPaths.appendToCache(mdInfo, perRevMap) {
return MdID{}, true, nil
}
// TODO: remove the revision from the cache on any errors below?
// Tricky when the append is only queued.
mdID, err = j.mdJournal.put(ctx, j.config.Crypto(),
j.config.encryptionKeyGetter(), j.config.BlockSplitter(),
rmd, false)
if err != nil {
return MdID{}, false, err
}
err = j.blockJournal.markMDRevision(ctx, rmd.Revision())
if err != nil {
return MdID{}, false, err
}
j.signalWork()
return mdID, false, nil
}
// prepAndAddRMDWithRetry prepare the paths without holding the lock,
// as `f` might need to take the lock. This is a no-op if the
// unflushed path cache is uninitialized. TODO: avoid doing this if
// we can somehow be sure the cache won't be initialized by the time
// we finish this operation.
func (j *tlfJournal) prepAndAddRMDWithRetry(ctx context.Context,
rmd *RootMetadata,
f func(unflushedPathMDInfo, unflushedPathsPerRevMap) (bool, error)) error {
mdInfo := unflushedPathMDInfo{
revision: rmd.Revision(),
kmd: rmd,
pmd: *rmd.Data(),
// TODO: Plumb through clock? Though the timestamp doesn't
// matter for the unflushed path cache.
localTimestamp: time.Now(),
}
perRevMap, err := j.unflushedPaths.prepUnflushedPaths(
ctx, j.uid, j.key, j.config.Codec(), j.log, mdInfo)
if err != nil {
return err
}
retry, err := f(mdInfo, perRevMap)
if err != nil {
return err
}
if retry {
// The cache was initialized after the last time we tried to
// prepare the unflushed paths.
perRevMap, err = j.unflushedPaths.prepUnflushedPaths(
ctx, j.uid, j.key, j.config.Codec(), j.log, mdInfo)
if err != nil {
return err
}
retry, err := f(mdInfo, perRevMap)
if err != nil {
return err
}
if retry {
return errors.New("Unexpectedly asked to retry " +
"MD put more than once")
}
}
return nil
}
func (j *tlfJournal) putMD(ctx context.Context, rmd *RootMetadata) (
MdID, error) {
var mdID MdID
err := j.prepAndAddRMDWithRetry(ctx, rmd,
func(mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap) (
retry bool, err error) {
mdID, retry, err = j.doPutMD(ctx, rmd, mdInfo, perRevMap)
return retry, err
})
if err != nil {
return MdID{}, err
}
return mdID, nil
}
func (j *tlfJournal) clearMDs(ctx context.Context, bid BranchID) error {
if j.onBranchChange != nil {
j.onBranchChange.onTLFBranchChange(j.tlfID, NullBranchID)
}
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.mdJournal.clear(ctx, bid)
if err != nil {
return err
}
j.resumeBackgroundWork()
return nil
}
func (j *tlfJournal) doResolveBranch(ctx context.Context,
bid BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata,
extra ExtraMetadata, mdInfo unflushedPathMDInfo,
perRevMap unflushedPathsPerRevMap) (mdID MdID, retry bool, err error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return MdID{}, false, err
}
// The set of unflushed paths could change as part of the
// resolution, and the revision numbers definitely change.
isPendingLocalSquash := bid == PendingLocalSquashBranchID
if !j.unflushedPaths.reinitializeWithResolution(
mdInfo, perRevMap, isPendingLocalSquash) {
return MdID{}, true, nil
}
// First write the resolution to a new branch, and swap it with
// the existing branch, then clear the existing branch.
mdID, err = j.mdJournal.resolveAndClear(
ctx, j.config.Crypto(), j.config.encryptionKeyGetter(),
j.config.BlockSplitter(), j.config.MDCache(), bid, rmd)
if err != nil {
return MdID{}, false, err
}
// Then go through and mark blocks and md rev markers for ignoring.
err = j.blockJournal.ignoreBlocksAndMDRevMarkers(ctx, blocksToDelete)
if err != nil {
return MdID{}, false, err
}
err = j.blockJournal.saveBlocksUntilNextMDFlush()
if err != nil {
return MdID{}, false, err
}
// Finally, append a new, non-ignored md rev marker for the new revision.
err = j.blockJournal.markMDRevision(ctx, rmd.Revision())
if err != nil {
return MdID{}, false, err
}
j.resumeBackgroundWork()
j.signalWork()
// TODO: kick off a background goroutine that deletes ignored
// block data files before the flush gets to them.
return mdID, false, nil
}
func (j *tlfJournal) resolveBranch(ctx context.Context,
bid BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata,
extra ExtraMetadata) (MdID, error) {
var mdID MdID
err := j.prepAndAddRMDWithRetry(ctx, rmd,
func(mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap) (
retry bool, err error) {
mdID, retry, err = j.doResolveBranch(
ctx, bid, blocksToDelete, rmd, extra, mdInfo, perRevMap)
return retry, err
})
if err != nil {
return MdID{}, err
}
return mdID, nil
}
func (j *tlfJournal) wait(ctx context.Context) error {
workLeft, err := j.wg.WaitUnlessPaused(ctx)
if err != nil {
return err
}
if workLeft {
j.log.CDebugf(ctx, "Wait completed with work left, "+
"due to paused journal")
}
return nil
}
| 1 | 15,192 | `FromCommand` seems to fit better, since with my suggestion below, all pauses will raise a signal on `needPause`. Also maybe the format `journalPauseConflict` and `journalPauseCommand` is better, since the journal isn't necessarily paused yet once we raise a pause signal. | keybase-kbfs | go |
@@ -236,7 +236,7 @@ func IsServiceNonRetryableError(err error) bool {
// IsServiceNonRetryableErrorGRPC checks if the error is a non retryable error.
func IsServiceNonRetryableErrorGRPC(err error) bool {
if err == context.DeadlineExceeded {
- return true
+ return false
}
if st, ok := status.FromError(err); ok { | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package common
import (
"fmt"
"math/rand"
"sort"
"strings"
"sync"
"time"
"github.com/dgryski/go-farm"
"github.com/gogo/protobuf/proto"
"github.com/gogo/status"
commonproto "go.temporal.io/temporal-proto/common"
"go.temporal.io/temporal-proto/enums"
"go.uber.org/yarpc/yarpcerrors"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
h "github.com/temporalio/temporal/.gen/go/history"
m "github.com/temporalio/temporal/.gen/go/matching"
workflow "github.com/temporalio/temporal/.gen/go/shared"
"github.com/temporalio/temporal/common/backoff"
"github.com/temporalio/temporal/common/log"
"github.com/temporalio/temporal/common/log/tag"
"github.com/temporalio/temporal/common/metrics"
)
const (
golandMapReserverNumberOfBytes = 48
retryPersistenceOperationInitialInterval = 50 * time.Millisecond
retryPersistenceOperationMaxInterval = 10 * time.Second
retryPersistenceOperationExpirationInterval = 30 * time.Second
historyServiceOperationInitialInterval = 50 * time.Millisecond
historyServiceOperationMaxInterval = 10 * time.Second
historyServiceOperationExpirationInterval = 30 * time.Second
matchingServiceOperationInitialInterval = 1000 * time.Millisecond
matchingServiceOperationMaxInterval = 10 * time.Second
matchingServiceOperationExpirationInterval = 30 * time.Second
frontendServiceOperationInitialInterval = 200 * time.Millisecond
frontendServiceOperationMaxInterval = 5 * time.Second
frontendServiceOperationExpirationInterval = 15 * time.Second
adminServiceOperationInitialInterval = 200 * time.Millisecond
adminServiceOperationMaxInterval = 5 * time.Second
adminServiceOperationExpirationInterval = 15 * time.Second
retryKafkaOperationInitialInterval = 50 * time.Millisecond
retryKafkaOperationMaxInterval = 10 * time.Second
retryKafkaOperationExpirationInterval = 30 * time.Second
contextExpireThreshold = 10 * time.Millisecond
// FailureReasonCompleteResultExceedsLimit is failureReason for complete result exceeds limit
FailureReasonCompleteResultExceedsLimit = "COMPLETE_RESULT_EXCEEDS_LIMIT"
// FailureReasonFailureDetailsExceedsLimit is failureReason for failure details exceeds limit
FailureReasonFailureDetailsExceedsLimit = "FAILURE_DETAILS_EXCEEDS_LIMIT"
// FailureReasonCancelDetailsExceedsLimit is failureReason for cancel details exceeds limit
FailureReasonCancelDetailsExceedsLimit = "CANCEL_DETAILS_EXCEEDS_LIMIT"
// FailureReasonHeartbeatExceedsLimit is failureReason for heartbeat exceeds limit
FailureReasonHeartbeatExceedsLimit = "HEARTBEAT_EXCEEDS_LIMIT"
// FailureReasonDecisionBlobSizeExceedsLimit is the failureReason for decision blob exceeds size limit
FailureReasonDecisionBlobSizeExceedsLimit = "DECISION_BLOB_SIZE_EXCEEDS_LIMIT"
// FailureReasonSizeExceedsLimit is reason to fail workflow when history size or count exceed limit
FailureReasonSizeExceedsLimit = "HISTORY_EXCEEDS_LIMIT"
// FailureReasonTransactionSizeExceedsLimit is the failureReason for when transaction cannot be committed because it exceeds size limit
FailureReasonTransactionSizeExceedsLimit = "TRANSACTION_SIZE_EXCEEDS_LIMIT"
)
var (
// ErrBlobSizeExceedsLimit is error for event blob size exceeds limit
ErrBlobSizeExceedsLimit = &workflow.BadRequestError{Message: "Blob data size exceeds limit."}
// ErrContextTimeoutTooShort is error for setting a very short context timeout when calling a long poll API
ErrContextTimeoutTooShort = &workflow.BadRequestError{Message: "Context timeout is too short."}
// ErrContextTimeoutNotSet is error for not setting a context timeout when calling a long poll API
ErrContextTimeoutNotSet = &workflow.BadRequestError{Message: "Context timeout is not set."}
// StContextTimeoutTooShort is error for setting a very short context timeout when calling a long poll API
StContextTimeoutTooShort = status.New(codes.InvalidArgument, "Context timeout is too short.")
// StContextTimeoutNotSet is error for not setting a context timeout when calling a long poll API
StContextTimeoutNotSet = status.New(codes.InvalidArgument, "Context timeout is not set.")
)
// AwaitWaitGroup calls Wait on the given wait
// Returns true if the Wait() call succeeded before the timeout
// Returns false if the Wait() did not return before the timeout
func AwaitWaitGroup(wg *sync.WaitGroup, timeout time.Duration) bool {
doneC := make(chan struct{})
go func() {
wg.Wait()
close(doneC)
}()
select {
case <-doneC:
return true
case <-time.After(timeout):
return false
}
}
// AddSecondsToBaseTime - Gets the UnixNano with given duration and base time.
func AddSecondsToBaseTime(baseTimeInNanoSec int64, durationInSeconds int64) int64 {
timeOut := time.Duration(durationInSeconds) * time.Second
return time.Unix(0, baseTimeInNanoSec).Add(timeOut).UnixNano()
}
// CreatePersistanceRetryPolicy creates a retry policy for persistence layer operations
func CreatePersistanceRetryPolicy() backoff.RetryPolicy {
policy := backoff.NewExponentialRetryPolicy(retryPersistenceOperationInitialInterval)
policy.SetMaximumInterval(retryPersistenceOperationMaxInterval)
policy.SetExpirationInterval(retryPersistenceOperationExpirationInterval)
return policy
}
// CreateHistoryServiceRetryPolicy creates a retry policy for calls to history service
func CreateHistoryServiceRetryPolicy() backoff.RetryPolicy {
policy := backoff.NewExponentialRetryPolicy(historyServiceOperationInitialInterval)
policy.SetMaximumInterval(historyServiceOperationMaxInterval)
policy.SetExpirationInterval(historyServiceOperationExpirationInterval)
return policy
}
// CreateMatchingServiceRetryPolicy creates a retry policy for calls to matching service
func CreateMatchingServiceRetryPolicy() backoff.RetryPolicy {
policy := backoff.NewExponentialRetryPolicy(matchingServiceOperationInitialInterval)
policy.SetMaximumInterval(matchingServiceOperationMaxInterval)
policy.SetExpirationInterval(matchingServiceOperationExpirationInterval)
return policy
}
// CreateFrontendServiceRetryPolicy creates a retry policy for calls to frontend service
func CreateFrontendServiceRetryPolicy() backoff.RetryPolicy {
policy := backoff.NewExponentialRetryPolicy(frontendServiceOperationInitialInterval)
policy.SetMaximumInterval(frontendServiceOperationMaxInterval)
policy.SetExpirationInterval(frontendServiceOperationExpirationInterval)
return policy
}
// CreateAdminServiceRetryPolicy creates a retry policy for calls to matching service
func CreateAdminServiceRetryPolicy() backoff.RetryPolicy {
policy := backoff.NewExponentialRetryPolicy(adminServiceOperationInitialInterval)
policy.SetMaximumInterval(adminServiceOperationMaxInterval)
policy.SetExpirationInterval(adminServiceOperationExpirationInterval)
return policy
}
// CreateKafkaOperationRetryPolicy creates a retry policy for kafka operation
func CreateKafkaOperationRetryPolicy() backoff.RetryPolicy {
policy := backoff.NewExponentialRetryPolicy(retryKafkaOperationInitialInterval)
policy.SetMaximumInterval(retryKafkaOperationMaxInterval)
policy.SetExpirationInterval(retryKafkaOperationExpirationInterval)
return policy
}
// IsPersistenceTransientError checks if the error is a transient persistence error
func IsPersistenceTransientError(err error) bool {
switch err.(type) {
case *workflow.InternalServiceError, *workflow.ServiceBusyError:
return true
}
return false
}
// IsKafkaTransientError check if the error is a transient kafka error
func IsKafkaTransientError(err error) bool {
return true
}
// IsServiceTransientError checks if the error is a retryable error.
func IsServiceTransientError(err error) bool {
return !IsServiceNonRetryableError(err)
}
// IsServiceTransientErrorGRPC checks if the error is a retryable error.
func IsServiceTransientErrorGRPC(err error) bool {
return !IsServiceNonRetryableErrorGRPC(err)
}
// IsServiceNonRetryableError checks if the error is a non retryable error.
func IsServiceNonRetryableError(err error) bool {
switch err := err.(type) {
case *workflow.EntityNotExistsError:
return true
case *workflow.BadRequestError:
return true
case *workflow.DomainNotActiveError:
return true
case *workflow.WorkflowExecutionAlreadyStartedError:
return true
case *workflow.CancellationAlreadyRequestedError:
return true
case *yarpcerrors.Status:
if err.Code() != yarpcerrors.CodeDeadlineExceeded {
return true
}
return false
}
return false
}
// IsServiceNonRetryableErrorGRPC checks if the error is a non retryable error.
func IsServiceNonRetryableErrorGRPC(err error) bool {
if err == context.DeadlineExceeded {
return true
}
if st, ok := status.FromError(err); ok {
if st.Code() == codes.NotFound ||
st.Code() == codes.InvalidArgument ||
st.Code() == codes.AlreadyExists {
return true
}
}
return false
}
// IsWhitelistServiceTransientError checks if the error is a transient error.
func IsWhitelistServiceTransientError(err error) bool {
if err == context.DeadlineExceeded {
return true
}
switch err.(type) {
case *workflow.InternalServiceError:
return true
case *workflow.ServiceBusyError:
return true
case *workflow.LimitExceededError:
return true
case *h.ShardOwnershipLostError:
return true
case *yarpcerrors.Status:
// We only selectively retry the following yarpc errors client can safe retry with a backoff
if yarpcerrors.IsDeadlineExceeded(err) ||
yarpcerrors.IsUnavailable(err) ||
yarpcerrors.IsUnknown(err) ||
yarpcerrors.IsInternal(err) {
return true
}
return false
}
return false
}
// IsWhitelistServiceTransientErrorGRPC checks if the error is a transient error.
func IsWhitelistServiceTransientErrorGRPC(err error) bool {
// TODO: wrong context package
if err == context.DeadlineExceeded {
return true
}
if st, ok := status.FromError(err); ok {
if st.Code() == codes.Internal ||
st.Code() == codes.ResourceExhausted ||
st.Code() == codes.Unavailable ||
st.Code() == codes.Unknown ||
st.Code() == codes.DeadlineExceeded {
// TODO: add *h.ShardOwnershipLostError handle here
return true
}
}
return false
}
// WorkflowIDToHistoryShard is used to map workflowID to a shardID
func WorkflowIDToHistoryShard(workflowID string, numberOfShards int) int {
hash := farm.Fingerprint32([]byte(workflowID))
return int(hash % uint32(numberOfShards))
}
// PrettyPrintHistory prints history in human readable format
func PrettyPrintHistory(history *commonproto.History, logger log.Logger) {
fmt.Println("******************************************")
fmt.Println("History", proto.MarshalTextString(history))
fmt.Println("******************************************")
}
// IsValidContext checks that the thrift context is not expired on cancelled.
// Returns nil if the context is still valid. Otherwise, returns the result of
// ctx.Err()
func IsValidContext(ctx context.Context) error {
ch := ctx.Done()
if ch != nil {
select {
case <-ch:
return ctx.Err()
default:
return nil
}
}
deadline, ok := ctx.Deadline()
if ok && time.Until(deadline) < contextExpireThreshold {
return context.DeadlineExceeded
}
return nil
}
// GenerateRandomString is used for generate test string
func GenerateRandomString(n int) string {
rand.Seed(time.Now().UnixNano())
letterRunes := []rune("random")
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
// CreateMatchingPollForDecisionTaskResponse create response for matching's PollForDecisionTask
func CreateMatchingPollForDecisionTaskResponse(historyResponse *h.RecordDecisionTaskStartedResponse, workflowExecution *workflow.WorkflowExecution, token []byte) *m.PollForDecisionTaskResponse {
matchingResp := &m.PollForDecisionTaskResponse{
WorkflowExecution: workflowExecution,
TaskToken: token,
Attempt: Int64Ptr(historyResponse.GetAttempt()),
WorkflowType: historyResponse.WorkflowType,
StartedEventId: historyResponse.StartedEventId,
StickyExecutionEnabled: historyResponse.StickyExecutionEnabled,
NextEventId: historyResponse.NextEventId,
DecisionInfo: historyResponse.DecisionInfo,
WorkflowExecutionTaskList: historyResponse.WorkflowExecutionTaskList,
BranchToken: historyResponse.BranchToken,
ScheduledTimestamp: historyResponse.ScheduledTimestamp,
StartedTimestamp: historyResponse.StartedTimestamp,
Queries: historyResponse.Queries,
}
if historyResponse.GetPreviousStartedEventId() != EmptyEventID {
matchingResp.PreviousStartedEventId = historyResponse.PreviousStartedEventId
}
return matchingResp
}
// MinInt64 returns the smaller of two given int64
func MinInt64(a, b int64) int64 {
if a < b {
return a
}
return b
}
// MaxInt64 returns the greater of two given int64
func MaxInt64(a, b int64) int64 {
if a > b {
return a
}
return b
}
// MinInt32 return smaller one of two inputs int32
func MinInt32(a, b int32) int32 {
if a < b {
return a
}
return b
}
// MinInt returns the smaller of two given integers
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt returns the greater one of two given integers
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// SortInt64Slice sorts the given int64 slice.
// Sort is not guaranteed to be stable.
func SortInt64Slice(slice []int64) {
sort.Slice(slice, func(i int, j int) bool {
return slice[i] < slice[j]
})
}
// ValidateRetryPolicy validates a retry policy
func ValidateRetryPolicy(policy *workflow.RetryPolicy) error {
if policy == nil {
// nil policy is valid which means no retry
return nil
}
if policy.GetInitialIntervalInSeconds() <= 0 {
return &workflow.BadRequestError{Message: "InitialIntervalInSeconds must be greater than 0 on retry policy."}
}
if policy.GetBackoffCoefficient() < 1 {
return &workflow.BadRequestError{Message: "BackoffCoefficient cannot be less than 1 on retry policy."}
}
if policy.GetMaximumIntervalInSeconds() < 0 {
return &workflow.BadRequestError{Message: "MaximumIntervalInSeconds cannot be less than 0 on retry policy."}
}
if policy.GetMaximumIntervalInSeconds() > 0 && policy.GetMaximumIntervalInSeconds() < policy.GetInitialIntervalInSeconds() {
return &workflow.BadRequestError{Message: "MaximumIntervalInSeconds cannot be less than InitialIntervalInSeconds on retry policy."}
}
if policy.GetMaximumAttempts() < 0 {
return &workflow.BadRequestError{Message: "MaximumAttempts cannot be less than 0 on retry policy."}
}
if policy.GetExpirationIntervalInSeconds() < 0 {
return &workflow.BadRequestError{Message: "ExpirationIntervalInSeconds cannot be less than 0 on retry policy."}
}
if policy.GetMaximumAttempts() == 0 && policy.GetExpirationIntervalInSeconds() == 0 {
return &workflow.BadRequestError{Message: "MaximumAttempts and ExpirationIntervalInSeconds are both 0. At least one of them must be specified."}
}
return nil
}
// CreateHistoryStartWorkflowRequest create a start workflow request for history
func CreateHistoryStartWorkflowRequest(
domainID string,
startRequest *workflow.StartWorkflowExecutionRequest,
) *h.StartWorkflowExecutionRequest {
now := time.Now()
histRequest := &h.StartWorkflowExecutionRequest{
DomainUUID: StringPtr(domainID),
StartRequest: startRequest,
}
if startRequest.RetryPolicy != nil && startRequest.RetryPolicy.GetExpirationIntervalInSeconds() > 0 {
expirationInSeconds := startRequest.RetryPolicy.GetExpirationIntervalInSeconds()
deadline := now.Add(time.Second * time.Duration(expirationInSeconds))
histRequest.ExpirationTimestamp = Int64Ptr(deadline.Round(time.Millisecond).UnixNano())
}
histRequest.FirstDecisionTaskBackoffSeconds = Int32Ptr(backoff.GetBackoffForNextScheduleInSeconds(startRequest.GetCronSchedule(), now, now))
return histRequest
}
// CheckEventBlobSizeLimit checks if a blob data exceeds limits. It logs a warning if it exceeds warnLimit,
// and return ErrBlobSizeExceedsLimit if it exceeds errorLimit.
func CheckEventBlobSizeLimit(actualSize, warnLimit, errorLimit int, domainID, workflowID, runID string, scope metrics.Scope, logger log.Logger) error {
scope.RecordTimer(metrics.EventBlobSize, time.Duration(actualSize))
if actualSize > warnLimit {
if logger != nil {
logger.Warn("Blob size exceeds limit.",
tag.WorkflowDomainID(domainID), tag.WorkflowID(workflowID), tag.WorkflowRunID(runID), tag.WorkflowSize(int64(actualSize)))
}
if actualSize > errorLimit {
return ErrBlobSizeExceedsLimit
}
}
return nil
}
// ValidateLongPollContextTimeout check if the context timeout for a long poll handler is too short or below a normal value.
// If the timeout is not set or too short, it logs an error, and return ErrContextTimeoutNotSet or ErrContextTimeoutTooShort
// accordingly. If the timeout is only below a normal value, it just logs an info and return nil.
func ValidateLongPollContextTimeout(
ctx context.Context,
handlerName string,
logger log.Logger,
) error {
deadline, err := ValidateLongPollContextTimeoutIsSet(ctx, handlerName, logger)
if err != nil {
return err
}
timeout := time.Until(deadline)
if timeout < MinLongPollTimeout {
err := ErrContextTimeoutTooShort
logger.Error("Context timeout is too short for long poll API.",
tag.WorkflowHandlerName(handlerName), tag.Error(err), tag.WorkflowPollContextTimeout(timeout))
return err
}
if timeout < CriticalLongPollTimeout {
logger.Warn("Context timeout is lower than critical value for long poll API.",
tag.WorkflowHandlerName(handlerName), tag.WorkflowPollContextTimeout(timeout))
}
return nil
}
// ValidateLongPollContextTimeoutIsSet checks if the context timeout is set for long poll requests.
func ValidateLongPollContextTimeoutIsSet(
ctx context.Context,
handlerName string,
logger log.Logger,
) (time.Time, error) {
deadline, ok := ctx.Deadline()
if !ok {
err := ErrContextTimeoutNotSet
logger.Error("Context timeout not set for long poll API.",
tag.WorkflowHandlerName(handlerName), tag.Error(err))
return deadline, err
}
return deadline, nil
}
// GetSizeOfMapStringToByteArray get size of map[string][]byte
func GetSizeOfMapStringToByteArray(input map[string][]byte) int {
if input == nil {
return 0
}
res := 0
for k, v := range input {
res += len(k) + len(v)
}
return res + golandMapReserverNumberOfBytes
}
// IsJustOrderByClause return true is query start with order by
func IsJustOrderByClause(clause string) bool {
whereClause := strings.TrimSpace(clause)
whereClause = strings.ToLower(whereClause)
return strings.HasPrefix(whereClause, "order by")
}
// ConvertIndexedValueTypeToThriftType takes fieldType as interface{} and convert to IndexedValueType.
// Because different implementation of dynamic config client may lead to different types
func ConvertIndexedValueTypeToThriftType(fieldType interface{}, logger log.Logger) workflow.IndexedValueType {
switch t := fieldType.(type) {
case float64:
return workflow.IndexedValueType(fieldType.(float64))
case int:
return workflow.IndexedValueType(fieldType.(int))
case workflow.IndexedValueType:
return fieldType.(workflow.IndexedValueType)
default:
// Unknown fieldType, please make sure dynamic config return correct value type
logger.Error("unknown index value type", tag.Value(fieldType), tag.ValueType(t))
return fieldType.(workflow.IndexedValueType) // it will panic and been captured by logger
}
}
// ConvertIndexedValueTypeToProtoType takes fieldType as interface{} and convert to IndexedValueType.
// Because different implementation of dynamic config client may lead to different types
func ConvertIndexedValueTypeToProtoType(fieldType interface{}, logger log.Logger) enums.IndexedValueType {
switch t := fieldType.(type) {
case float64:
return enums.IndexedValueType(fieldType.(float64))
case int:
return enums.IndexedValueType(fieldType.(int))
case enums.IndexedValueType:
return fieldType.(enums.IndexedValueType)
default:
// Unknown fieldType, please make sure dynamic config return correct value type
logger.Error("unknown index value type", tag.Value(fieldType), tag.ValueType(t))
return fieldType.(enums.IndexedValueType) // it will panic and been captured by logger
}
}
// GetDefaultAdvancedVisibilityWritingMode get default advancedVisibilityWritingMode based on
// whether related config exists in static config file.
func GetDefaultAdvancedVisibilityWritingMode(isAdvancedVisConfigExist bool) string {
if isAdvancedVisConfigExist {
return AdvancedVisibilityWritingModeOn
}
return AdvancedVisibilityWritingModeOff
}
| 1 | 9,259 | This is fix from another PR #120. | temporalio-temporal | go |
@@ -20,7 +20,9 @@ class MultiInterface(Interface):
datatype = 'multitabular'
- subtypes = ['dataframe', 'dictionary', 'array', 'dask']
+ subtypes = ['dictionary', 'dataframe', 'array', 'dask']
+
+ multi = True
@classmethod
def init(cls, eltype, data, kdims, vdims): | 1 | import numpy as np
from ..util import max_range
from .interface import Interface
class MultiInterface(Interface):
"""
MultiInterface allows wrapping around a list of tabular datasets
including dataframes, the columnar dictionary format or 2D tabular
NumPy arrays. Using the split method the list of tabular data can
be split into individual datasets.
The interface makes the data appear a list of tabular datasets as
a single dataset. The length, shape and values methods therefore
make the data appear like a single array of concatenated subpaths,
separated by NaN values.
"""
types = ()
datatype = 'multitabular'
subtypes = ['dataframe', 'dictionary', 'array', 'dask']
@classmethod
def init(cls, eltype, data, kdims, vdims):
new_data = []
dims = {'kdims': eltype.kdims, 'vdims': eltype.vdims}
if kdims is not None:
dims['kdims'] = kdims
if vdims is not None:
dims['vdims'] = vdims
if not isinstance(data, list):
raise ValueError('MultiInterface data must be a list tabular data types.')
prev_interface, prev_dims = None, None
for d in data:
d, interface, dims, _ = Interface.initialize(eltype, d, kdims, vdims,
datatype=cls.subtypes)
if prev_interface:
if prev_interface != interface:
raise ValueError('MultiInterface subpaths must all have matching datatype.')
if dims['kdims'] != prev_dims['kdims']:
raise ValueError('MultiInterface subpaths must all have matching kdims.')
if dims['vdims'] != prev_dims['vdims']:
raise ValueError('MultiInterface subpaths must all have matching vdims.')
new_data.append(d)
prev_interface, prev_dims = interface, dims
return new_data, dims, {}
@classmethod
def validate(cls, dataset):
# Ensure that auxilliary key dimensions on each subpaths are scalar
if dataset.ndims <= 2:
return
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
for dim in dataset.kdims[2:]:
if len(ds.dimension_values(dim, expanded=False)) > 1:
raise ValueError("'%s' key dimension value must have a constant value on each subpath, "
"for paths with value for each coordinate of the array declare a "
"value dimension instead." % dim)
@classmethod
def _inner_dataset_template(cls, dataset):
"""
Returns a Dataset template used as a wrapper around the data
contained within the multi-interface dataset.
"""
from . import Dataset
vdims = dataset.vdims if getattr(dataset, 'level', None) is None else []
return Dataset(dataset.data[0], datatype=cls.subtypes,
kdims=dataset.kdims, vdims=vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if not dataset.data:
# Note: Required to make empty datasets work at all (should fix)
# Other interfaces declare equivalent of empty array
# which defaults to float type
return float
ds = cls._inner_dataset_template(dataset)
return ds.interface.dimension_type(ds, dim)
@classmethod
def range(cls, dataset, dim):
if not dataset.data:
return (None, None)
ranges = []
ds = cls._inner_dataset_template(dataset)
# Backward compatibility for Contours/Polygons level
level = getattr(dataset, 'level', None)
dim = dataset.get_dimension(dim)
if level is not None and dim is dataset.vdims[0]:
return (level, level)
for d in dataset.data:
ds.data = d
ranges.append(ds.interface.range(ds, dim))
return max_range(ranges)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
"""
Applies selectiong on all the subpaths.
"""
ds = cls._inner_dataset_template(dataset)
data = []
for d in dataset.data:
ds.data = d
sel = ds.interface.select(ds, **selection)
data.append(sel)
return data
@classmethod
def select_paths(cls, dataset, selection):
"""
Allows selecting paths with usual NumPy slicing index.
"""
return [s[0] for s in np.array([{0: p} for p in dataset.data])[selection]]
@classmethod
def aggregate(cls, columns, dimensions, function, **kwargs):
raise NotImplementedError('Aggregation currently not implemented')
@classmethod
def groupby(cls, columns, dimensions, container_type, group_type, **kwargs):
raise NotImplementedError('Grouping currently not implemented')
@classmethod
def sample(cls, columns, samples=[]):
raise NotImplementedError('Sampling operation on subpaths not supported')
@classmethod
def shape(cls, dataset):
"""
Returns the shape of all subpaths, making it appear like a
single array of concatenated subpaths separated by NaN values.
"""
if not dataset.data:
return (0, len(dataset.dimensions()))
rows, cols = 0, 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
r, cols = ds.interface.shape(ds)
rows += r
return rows+len(dataset.data)-1, cols
@classmethod
def length(cls, dataset):
"""
Returns the length of the multi-tabular dataset making it appear
like a single array of concatenated subpaths separated by NaN
values.
"""
if not dataset.data:
return 0
length = 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
length += ds.interface.length(ds)
return length+len(dataset.data)-1
@classmethod
def nonzero(cls, dataset):
return bool(cls.length(dataset))
@classmethod
def redim(cls, dataset, dimensions):
if not dataset.data:
return dataset.data
new_data = []
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
new_data.append(ds.interface.redim(ds, dimensions))
return new_data
@classmethod
def values(cls, dataset, dimension, expanded, flat):
"""
Returns a single concatenated array of all subpaths separated
by NaN values. If expanded keyword is False an array of arrays
is returned.
"""
if not dataset.data:
return np.array([])
values = []
ds = cls._inner_dataset_template(dataset)
didx = dataset.get_dimension_index(dimension)
for d in dataset.data:
ds.data = d
expand = expanded if didx>1 and dimension in dataset.kdims else True
dvals = ds.interface.values(ds, dimension, expand, flat)
values.append(dvals)
if expanded:
values.append([np.NaN])
elif not expand and len(dvals):
values[-1] = dvals[0]
if not values:
return np.array()
elif expanded:
return np.concatenate(values[:-1])
else:
return np.array(values)
@classmethod
def split(cls, dataset, start, end):
"""
Splits a multi-interface Dataset into regular Datasets using
regular tabular interfaces.
"""
objs = []
for d in dataset.data[start: end]:
objs.append(dataset.clone(d, datatype=cls.subtypes))
return objs
Interface.register(MultiInterface)
| 1 | 19,000 | I *think* it makes sense to try the more general dictionary (i.e standard python literals) format first. Might be other implications I haven't figured out yet. Then again, ``MultiInterface`` is pretty new so it probably doesn't matter wrt backwards compatibility. | holoviz-holoviews | py |
@@ -32,9 +32,13 @@ class FixMediaContextCommand extends ContainerAwareCommand
*/
public function execute(InputInterface $input, OutputInterface $output)
{
+ if (!$this->getContainer()->has('sonata.media.manager.category')) {
+ throw new \LogicException('The classification feature is disabled.');
+ }
+
$pool = $this->getContainer()->get('sonata.media.pool');
$contextManager = $this->getContainer()->get('sonata.classification.manager.context');
- $cateoryManager = $this->getContainer()->get('sonata.classification.manager.category');
+ $categoryManager = $this->getContainer()->get('sonata.media.manager.category');
foreach ($pool->getContexts() as $context => $contextAttrs) {
/** @var ContextInterface $defaultContext */ | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <thomas.rabaix@sonata-project.org>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Command;
use Sonata\ClassificationBundle\Model\ContextInterface;
use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Output\OutputInterface;
class FixMediaContextCommand extends ContainerAwareCommand
{
/**
* {@inheritdoc}
*/
public function configure()
{
$this->setName('sonata:media:fix-media-context');
$this->setDescription('Generate the default category for each media context');
}
/**
* {@inheritdoc}
*/
public function execute(InputInterface $input, OutputInterface $output)
{
$pool = $this->getContainer()->get('sonata.media.pool');
$contextManager = $this->getContainer()->get('sonata.classification.manager.context');
$cateoryManager = $this->getContainer()->get('sonata.classification.manager.category');
foreach ($pool->getContexts() as $context => $contextAttrs) {
/** @var ContextInterface $defaultContext */
$defaultContext = $contextManager->findOneBy(array(
'id' => $context,
));
if (!$defaultContext) {
$output->writeln(sprintf(" > default context for '%s' is missing, creating one", $context));
$defaultContext = $contextManager->create();
$defaultContext->setId($context);
$defaultContext->setName(ucfirst($context));
$defaultContext->setEnabled(true);
$contextManager->save($defaultContext);
}
$defaultCategory = $cateoryManager->getRootCategory($defaultContext);
if (!$defaultCategory) {
$output->writeln(sprintf(" > default category for '%s' is missing, creating one", $context));
$defaultCategory = $cateoryManager->create();
$defaultCategory->setContext($defaultContext);
$defaultCategory->setName(ucfirst($context));
$defaultCategory->setEnabled(true);
$defaultCategory->setPosition(0);
$cateoryManager->save($defaultCategory);
}
}
$output->writeln('Done!');
}
}
| 1 | 9,179 | lol that variable name | sonata-project-SonataMediaBundle | php |
@@ -31,8 +31,8 @@ const (
ChainFilterForward = ChainNamePrefix + "-FORWARD"
ChainFilterOutput = ChainNamePrefix + "-OUTPUT"
- ChainFailsafeIn = ChainNamePrefix + "-FAILSAFE-IN"
- ChainFailsafeOut = ChainNamePrefix + "-FAILSAFE-OUT"
+ ChainFailsafeIn = ChainNamePrefix + "-failsafe-in"
+ ChainFailsafeOut = ChainNamePrefix + "-failsafe-out"
ChainNATPrerouting = ChainNamePrefix + "-PREROUTING"
ChainNATPostrouting = ChainNamePrefix + "-POSTROUTING" | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
log "github.com/Sirupsen/logrus"
"github.com/projectcalico/felix/go/felix/ipsets"
"github.com/projectcalico/felix/go/felix/iptables"
"github.com/projectcalico/felix/go/felix/proto"
"net"
"strings"
)
const (
ChainNamePrefix = "cali"
IPSetNamePrefix = "cali"
ChainFilterInput = ChainNamePrefix + "-INPUT"
ChainFilterForward = ChainNamePrefix + "-FORWARD"
ChainFilterOutput = ChainNamePrefix + "-OUTPUT"
ChainFailsafeIn = ChainNamePrefix + "-FAILSAFE-IN"
ChainFailsafeOut = ChainNamePrefix + "-FAILSAFE-OUT"
ChainNATPrerouting = ChainNamePrefix + "-PREROUTING"
ChainNATPostrouting = ChainNamePrefix + "-POSTROUTING"
ChainNATOutgoing = ChainNamePrefix + "-nat-outgoing"
IPSetIDNATOutgoingAllPools = "all-ipam-pools"
IPSetIDNATOutgoingMasqPools = "masq-ipam-pools"
IPSetIDAllHostIPs = "all-hosts"
PolicyInboundPfx = ChainNamePrefix + "pi-"
PolicyOutboundPfx = ChainNamePrefix + "po-"
ChainWorkloadToHost = ChainNamePrefix + "-wl-to-host"
ChainFromWorkloadDispatch = ChainNamePrefix + "-from-wl-dispatch"
ChainToWorkloadDispatch = ChainNamePrefix + "-to-wl-dispatch"
ChainDispatchToHostEndpoint = ChainNamePrefix + "-to-host-endpoint"
ChainDispatchFromHostEndpoint = ChainNamePrefix + "-from-host-endpoint"
WorkloadToEndpointPfx = ChainNamePrefix + "tw-"
WorkloadFromEndpointPfx = ChainNamePrefix + "fw-"
HostToEndpointPfx = ChainNamePrefix + "th-"
HostFromEndpointPfx = ChainNamePrefix + "fh-"
RuleHashPrefix = "cali:"
// HistoricNATRuleInsertRegex is a regex pattern to match to match
// special-case rules inserted by old versions of felix. Specifically,
// Python felix used to insert a masquerade rule directly into the
// POSTROUTING chain.
//
// Note: this regex depends on the output format of iptables-save so,
// where possible, it's best to match only on part of the rule that
// we're sure can't change (such as the ipset name in the masquerade
// rule).
HistoricInsertedNATRuleRegex = `-A POSTROUTING .* felix-masq-ipam-pools .*|` +
`-A POSTROUTING -o tunl0 -m addrtype ! --src-type LOCAL --limit-iface-out -m addrtype --src-type LOCAL -j MASQUERADE`
)
var (
// AllHistoricChainNamePrefixes lists all the prefixes that we've used for chains. Keeping
// track of the old names lets us clean them up.
AllHistoricChainNamePrefixes = []string{"felix-", "cali"}
// AllHistoricIPSetNamePrefixes, similarly contains all the prefixes we've ever used for IP
// sets.
AllHistoricIPSetNamePrefixes = []string{"felix-", "cali"}
// LegacyV4IPSetNames contains some extra IP set names that were used in older versions of
// Felix and don't fit our versioned pattern.
LegacyV4IPSetNames = []string{"felix-masq-ipam-pools", "felix-all-ipam-pools"}
)
type RuleRenderer interface {
StaticFilterTableChains(ipVersion uint8) []*iptables.Chain
StaticFilterForwardChains() []*iptables.Chain
StaticNATTableChains(ipVersion uint8) []*iptables.Chain
WorkloadDispatchChains(map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain
WorkloadEndpointToIptablesChains(epID *proto.WorkloadEndpointID, endpoint *proto.WorkloadEndpoint) []*iptables.Chain
HostDispatchChains(map[string]proto.HostEndpointID) []*iptables.Chain
HostEndpointToIptablesChains(ifaceName string, endpoint *proto.HostEndpoint) []*iptables.Chain
PolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain
ProfileToIptablesChains(policyID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) []*iptables.Chain
ProtoRuleToIptablesRules(pRule *proto.Rule, ipVersion uint8) []iptables.Rule
NATOutgoingChain(active bool, ipVersion uint8) *iptables.Chain
}
type ruleRenderer struct {
Config
dropActions []iptables.Action
inputAcceptActions []iptables.Action
}
func (r *ruleRenderer) ipSetConfig(ipVersion uint8) *ipsets.IPVersionConfig {
if ipVersion == 4 {
return r.IPSetConfigV4
} else if ipVersion == 6 {
return r.IPSetConfigV6
} else {
log.WithField("version", ipVersion).Panic("Unknown IP version")
return nil
}
}
type Config struct {
IPSetConfigV4 *ipsets.IPVersionConfig
IPSetConfigV6 *ipsets.IPVersionConfig
WorkloadIfacePrefixes []string
IptablesMarkAccept uint32
IptablesMarkNextTier uint32
OpenStackMetadataIP net.IP
OpenStackMetadataPort uint16
OpenStackSpecialCasesEnabled bool
IPIPEnabled bool
IPIPTunnelAddress net.IP
ActionOnDrop string
EndpointToHostAction string
FailsafeInboundHostPorts []uint16
FailsafeOutboundHostPorts []uint16
}
func NewRenderer(config Config) RuleRenderer {
log.WithField("config", config).Info("Creating rule renderer.")
// Convert configured actions to rule slices. First, what should we actually do when we'd
// normally drop a packet? For sandbox mode, we support allowing the packet instead, or
// logging it.
var dropActions []iptables.Action
if strings.HasPrefix(config.ActionOnDrop, "LOG-") {
log.Warn("Action on drop includes LOG. All dropped packets will be logged.")
dropActions = append(dropActions, iptables.LogAction{Prefix: "calico-drop"})
}
if strings.HasSuffix(config.ActionOnDrop, "ACCEPT") {
log.Warn("Action on drop set to ACCEPT. Calico security is disabled!")
dropActions = append(dropActions, iptables.AcceptAction{})
} else {
dropActions = append(dropActions, iptables.DropAction{})
}
// Second, what should we do with packets that come from workloads to the host itself.
var inputAcceptActions []iptables.Action
switch config.EndpointToHostAction {
case "DROP":
log.Info("Workload to host packets will be dropped.")
inputAcceptActions = dropActions
case "ACCEPT":
log.Info("Workload to host packets will be accepted.")
inputAcceptActions = []iptables.Action{iptables.AcceptAction{}}
default:
log.Info("Workload to host packets will be returned to INPUT chain.")
inputAcceptActions = []iptables.Action{iptables.ReturnAction{}}
}
return &ruleRenderer{
Config: config,
dropActions: dropActions,
inputAcceptActions: inputAcceptActions,
}
}
| 1 | 14,857 | Upper case is reserved for our versions of top-level chains i.e. the `FORWARD` chain jumps to `cali-FORWARD`. | projectcalico-felix | go |
@@ -3373,10 +3373,14 @@ void nano::json_handler::receive ()
}
if (!ec)
{
+ // Representative is only used by receive_action when opening accounts
+ // Set a wallet default representative for new accounts
+ nano::account representative (wallet->store.representative (node.wallets.tx_begin_read ()));
+
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (response);
// clang-format off
- wallet->receive_async(std::move(block), account, node.network_params.ledger.genesis_amount, [response_a](std::shared_ptr<nano::block> block_a) {
+ wallet->receive_async(std::move(block), representative, node.network_params.ledger.genesis_amount, [response_a](std::shared_ptr<nano::block> block_a) {
if (block_a != nullptr)
{
boost::property_tree::ptree response_l; | 1 | #include <nano/lib/config.hpp>
#include <nano/lib/json_error_response.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/common.hpp>
#include <nano/node/ipc.hpp>
#include <nano/node/json_handler.hpp>
#include <nano/node/json_payment_observer.hpp>
#include <nano/node/node.hpp>
#include <nano/node/node_rpc_config.hpp>
#include <boost/array.hpp>
#include <boost/bind.hpp>
#include <boost/endian/conversion.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/thread/thread_time.hpp>
#include <algorithm>
#include <chrono>
#include <cstdio>
#include <fstream>
#include <future>
#include <iostream>
#include <thread>
namespace
{
void construct_json (nano::seq_con_info_component * component, boost::property_tree::ptree & parent);
using ipc_json_handler_no_arg_func_map = std::unordered_map<std::string, std::function<void(nano::json_handler *)>>;
ipc_json_handler_no_arg_func_map create_ipc_json_handler_no_arg_func_map ();
auto ipc_json_handler_no_arg_funcs = create_ipc_json_handler_no_arg_func_map ();
bool block_confirmed (nano::node & node, nano::transaction & transaction, nano::block_hash const & hash, bool include_active, bool include_only_confirmed);
const char * epoch_as_string (nano::epoch);
}
nano::json_handler::json_handler (nano::node & node_a, nano::node_rpc_config const & node_rpc_config_a, std::string const & body_a, std::function<void(std::string const &)> const & response_a, std::function<void()> stop_callback_a) :
body (body_a),
node (node_a),
response (response_a),
stop_callback (stop_callback_a),
node_rpc_config (node_rpc_config_a)
{
}
void nano::json_handler::process_request (bool unsafe_a)
{
try
{
std::stringstream istream (body);
boost::property_tree::read_json (istream, request);
action = request.get<std::string> ("action");
auto no_arg_func_iter = ipc_json_handler_no_arg_funcs.find (action);
if (no_arg_func_iter != ipc_json_handler_no_arg_funcs.cend ())
{
// First try the map of options with no arguments
no_arg_func_iter->second (this);
}
else
{
// Try the rest of the options
if (action == "wallet_seed")
{
if (unsafe_a || node.network_params.network.is_test_network ())
{
wallet_seed ();
}
else
{
json_error_response (response, "Unsafe RPC not allowed");
}
}
else if (action == "chain")
{
chain ();
}
else if (action == "successors")
{
chain (true);
}
else if (action == "history")
{
request.put ("head", request.get<std::string> ("hash"));
account_history ();
}
else if (action == "knano_from_raw" || action == "krai_from_raw")
{
mnano_from_raw (nano::kxrb_ratio);
}
else if (action == "knano_to_raw" || action == "krai_to_raw")
{
mnano_to_raw (nano::kxrb_ratio);
}
else if (action == "nano_from_raw" || action == "rai_from_raw")
{
mnano_from_raw (nano::xrb_ratio);
}
else if (action == "nano_to_raw" || action == "rai_to_raw")
{
mnano_to_raw (nano::xrb_ratio);
}
else if (action == "mnano_from_raw" || action == "mrai_from_raw")
{
mnano_from_raw ();
}
else if (action == "mnano_to_raw" || action == "mrai_to_raw")
{
mnano_to_raw ();
}
else if (action == "password_valid")
{
password_valid ();
}
else if (action == "wallet_locked")
{
password_valid (true);
}
else
{
json_error_response (response, "Unknown command");
}
}
}
catch (std::runtime_error const &)
{
json_error_response (response, "Unable to parse JSON");
}
catch (...)
{
json_error_response (response, "Internal server error in RPC");
}
}
void nano::json_handler::response_errors ()
{
if (ec || response_l.empty ())
{
boost::property_tree::ptree response_error;
response_error.put ("error", ec ? ec.message () : "Empty response");
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_error);
response (ostream.str ());
}
else
{
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response (ostream.str ());
}
}
std::shared_ptr<nano::wallet> nano::json_handler::wallet_impl ()
{
if (!ec)
{
std::string wallet_text (request.get<std::string> ("wallet"));
nano::wallet_id wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
return existing->second;
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
else
{
ec = nano::error_common::bad_wallet_number;
}
}
return nullptr;
}
bool nano::json_handler::wallet_locked_impl (nano::transaction const & transaction_a, std::shared_ptr<nano::wallet> wallet_a)
{
bool result (false);
if (!ec)
{
if (!wallet_a->store.valid_password (transaction_a))
{
ec = nano::error_common::wallet_locked;
result = true;
}
}
return result;
}
bool nano::json_handler::wallet_account_impl (nano::transaction const & transaction_a, std::shared_ptr<nano::wallet> wallet_a, nano::account const & account_a)
{
bool result (false);
if (!ec)
{
if (wallet_a->store.find (transaction_a, account_a) != wallet_a->store.end ())
{
result = true;
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
return result;
}
nano::account nano::json_handler::account_impl (std::string account_text, std::error_code ec_a)
{
nano::account result (0);
if (!ec)
{
if (account_text.empty ())
{
account_text = request.get<std::string> ("account");
}
if (result.decode_account (account_text))
{
ec = ec_a;
}
else if (account_text[3] == '-' || account_text[4] == '-')
{
// nano- and xrb- prefixes are deprecated
response_l.put ("deprecated_account_format", "1");
}
}
return result;
}
nano::account_info nano::json_handler::account_info_impl (nano::transaction const & transaction_a, nano::account const & account_a)
{
nano::account_info result;
if (!ec)
{
if (node.store.account_get (transaction_a, account_a, result))
{
ec = nano::error_common::account_not_found;
node.bootstrap_initiator.bootstrap_lazy (account_a, false, false);
}
}
return result;
}
nano::amount nano::json_handler::amount_impl ()
{
nano::amount result (0);
if (!ec)
{
std::string amount_text (request.get<std::string> ("amount"));
if (result.decode_dec (amount_text))
{
ec = nano::error_common::invalid_amount;
}
}
return result;
}
std::shared_ptr<nano::block> nano::json_handler::block_impl (bool signature_work_required)
{
std::shared_ptr<nano::block> result{ nullptr };
if (!ec)
{
std::string block_text (request.get<std::string> ("block"));
boost::property_tree::ptree block_l;
std::stringstream block_stream (block_text);
try
{
boost::property_tree::read_json (block_stream, block_l);
}
catch (...)
{
ec = nano::error_blocks::invalid_block;
}
if (!ec)
{
if (!signature_work_required)
{
block_l.put ("signature", "0");
block_l.put ("work", "0");
}
result = nano::deserialize_block_json (block_l);
if (result == nullptr)
{
ec = nano::error_blocks::invalid_block;
}
}
}
return result;
}
std::shared_ptr<nano::block> nano::json_handler::block_json_impl (bool signature_work_required)
{
std::shared_ptr<nano::block> result;
if (!ec)
{
auto block_l (request.get_child ("block"));
if (!signature_work_required)
{
block_l.put ("signature", "0");
block_l.put ("work", "0");
}
result = nano::deserialize_block_json (block_l);
if (result == nullptr)
{
ec = nano::error_blocks::invalid_block;
}
}
return result;
}
nano::block_hash nano::json_handler::hash_impl (std::string search_text)
{
nano::block_hash result (0);
if (!ec)
{
std::string hash_text (request.get<std::string> (search_text));
if (result.decode_hex (hash_text))
{
ec = nano::error_blocks::invalid_block_hash;
}
}
return result;
}
nano::amount nano::json_handler::threshold_optional_impl ()
{
nano::amount result (0);
boost::optional<std::string> threshold_text (request.get_optional<std::string> ("threshold"));
if (!ec && threshold_text.is_initialized ())
{
if (result.decode_dec (threshold_text.get ()))
{
ec = nano::error_common::bad_threshold;
}
}
return result;
}
uint64_t nano::json_handler::work_optional_impl ()
{
uint64_t result (0);
boost::optional<std::string> work_text (request.get_optional<std::string> ("work"));
if (!ec && work_text.is_initialized ())
{
if (nano::from_string_hex (work_text.get (), result))
{
ec = nano::error_common::bad_work_format;
}
}
return result;
}
uint64_t nano::json_handler::difficulty_optional_impl ()
{
uint64_t difficulty (node.network_params.network.publish_threshold);
boost::optional<std::string> difficulty_text (request.get_optional<std::string> ("difficulty"));
if (!ec && difficulty_text.is_initialized ())
{
if (nano::from_string_hex (difficulty_text.get (), difficulty))
{
ec = nano::error_rpc::bad_difficulty_format;
}
}
return difficulty;
}
double nano::json_handler::multiplier_optional_impl (uint64_t & difficulty)
{
double multiplier (1.);
boost::optional<std::string> multiplier_text (request.get_optional<std::string> ("multiplier"));
if (!ec && multiplier_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<double> (multiplier_text.get (), multiplier);
if (success && multiplier > 0.)
{
difficulty = nano::difficulty::from_multiplier (multiplier, node.network_params.network.publish_threshold);
}
else
{
ec = nano::error_rpc::bad_multiplier_format;
}
}
return multiplier;
}
namespace
{
bool decode_unsigned (std::string const & text, uint64_t & number)
{
bool result;
size_t end;
try
{
number = std::stoull (text, &end);
result = false;
}
catch (std::invalid_argument const &)
{
result = true;
}
catch (std::out_of_range const &)
{
result = true;
}
result = result || end != text.size ();
return result;
}
}
uint64_t nano::json_handler::count_impl ()
{
uint64_t result (0);
if (!ec)
{
std::string count_text (request.get<std::string> ("count"));
if (decode_unsigned (count_text, result) || result == 0)
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::json_handler::count_optional_impl (uint64_t result)
{
boost::optional<std::string> count_text (request.get_optional<std::string> ("count"));
if (!ec && count_text.is_initialized ())
{
if (decode_unsigned (count_text.get (), result))
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::json_handler::offset_optional_impl (uint64_t result)
{
boost::optional<std::string> offset_text (request.get_optional<std::string> ("offset"));
if (!ec && offset_text.is_initialized ())
{
if (decode_unsigned (offset_text.get (), result))
{
ec = nano::error_rpc::invalid_offset;
}
}
return result;
}
void nano::json_handler::account_balance ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.balance_pending (account));
response_l.put ("balance", balance.first.convert_to<std::string> ());
response_l.put ("pending", balance.second.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::account_block_count ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
if (!ec)
{
response_l.put ("block_count", std::to_string (info.block_count));
}
}
response_errors ();
}
void nano::json_handler::account_create ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
const bool generate_work = rpc_l->request.get<bool> ("work", true);
nano::account new_key;
auto index_text (rpc_l->request.get_optional<std::string> ("index"));
if (index_text.is_initialized ())
{
uint64_t index;
if (decode_unsigned (index_text.get (), index) || index > static_cast<uint64_t> (std::numeric_limits<uint32_t>::max ()))
{
rpc_l->ec = nano::error_common::invalid_index;
}
else
{
new_key = wallet->deterministic_insert (static_cast<uint32_t> (index), generate_work);
}
}
else
{
new_key = wallet->deterministic_insert (generate_work);
}
if (!rpc_l->ec)
{
if (!new_key.is_zero ())
{
rpc_l->response_l.put ("account", new_key.to_account ());
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::account_get ()
{
std::string key_text (request.get<std::string> ("key"));
nano::public_key pub;
if (!pub.decode_hex (key_text))
{
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_public_key;
}
response_errors ();
}
void nano::json_handler::account_info ()
{
auto account (account_impl ());
if (!ec)
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
uint64_t confirmation_height;
if (node.store.confirmation_height_get (transaction, account, confirmation_height))
{
ec = nano::error_common::account_not_found;
}
if (!ec)
{
response_l.put ("frontier", info.head.to_string ());
response_l.put ("open_block", info.open_block.to_string ());
response_l.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_l.put ("balance", balance);
response_l.put ("modified_timestamp", std::to_string (info.modified));
response_l.put ("block_count", std::to_string (info.block_count));
response_l.put ("account_version", epoch_as_string (info.epoch ()));
response_l.put ("confirmation_height", std::to_string (confirmation_height));
if (representative)
{
response_l.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_l.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
response_l.put ("pending", account_pending.convert_to<std::string> ());
}
}
}
response_errors ();
}
void nano::json_handler::account_key ()
{
auto account (account_impl ());
if (!ec)
{
response_l.put ("key", account.to_string ());
}
response_errors ();
}
void nano::json_handler::account_list ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), j (wallet->store.end ()); i != j; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", nano::account (i->first).to_account ());
accounts.push_back (std::make_pair ("", entry));
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::account_move ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string source_text (rpc_l->request.get<std::string> ("source"));
auto accounts_text (rpc_l->request.get_child ("accounts"));
nano::wallet_id source;
if (!source.decode_hex (source_text))
{
auto existing (rpc_l->node.wallets.items.find (source));
if (existing != rpc_l->node.wallets.items.end ())
{
auto source (existing->second);
std::vector<nano::public_key> accounts;
for (auto i (accounts_text.begin ()), n (accounts_text.end ()); i != n; ++i)
{
auto account (rpc_l->account_impl (i->second.get<std::string> ("")));
accounts.push_back (account);
}
auto transaction (rpc_l->node.wallets.tx_begin_write ());
auto error (wallet->store.move (transaction, source->store, accounts));
rpc_l->response_l.put ("moved", error ? "0" : "1");
}
else
{
rpc_l->ec = nano::error_rpc::source_not_found;
}
}
else
{
rpc_l->ec = nano::error_rpc::bad_source;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::account_remove ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
wallet->store.erase (transaction, account);
rpc_l->response_l.put ("removed", "1");
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::account_representative ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
if (!ec)
{
response_l.put ("representative", info.representative.to_account ());
}
}
response_errors ();
}
void nano::json_handler::account_representative_set ()
{
auto rpc_l (shared_from_this ());
// clang-format off
node.worker.push_task ([ rpc_l, work_generation_enabled = node.work_generation_enabled () ]() {
// clang-format on
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
std::string representative_text (rpc_l->request.get<std::string> ("representative"));
auto representative (rpc_l->account_impl (representative_text, nano::error_rpc::bad_representative_number));
if (!rpc_l->ec)
{
auto work (rpc_l->work_optional_impl ());
if (!rpc_l->ec && work)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
auto block_transaction (rpc_l->node.store.tx_begin_read ());
auto info (rpc_l->account_info_impl (block_transaction, account));
if (!rpc_l->ec)
{
if (nano::work_validate (info.head, work))
{
rpc_l->ec = nano::error_common::invalid_work;
}
}
}
}
else if (!rpc_l->ec) // work == 0
{
if (!work_generation_enabled)
{
rpc_l->ec = nano::error_common::disabled_work_generation;
}
}
if (!rpc_l->ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (rpc_l->response);
auto response_data (std::make_shared<boost::property_tree::ptree> (rpc_l->response_l));
// clang-format off
wallet->change_async(account, representative, [response_a, response_data](std::shared_ptr<nano::block> block) {
if (block != nullptr)
{
response_data->put("block", block->hash().to_string());
std::stringstream ostream;
boost::property_tree::write_json(ostream, *response_data);
response_a(ostream.str());
}
else
{
json_error_response(response_a, "Error generating block");
}
},
work, generate_work);
// clang-format on
}
}
// Because of change_async
if (rpc_l->ec)
{
rpc_l->response_errors ();
}
});
}
void nano::json_handler::account_weight ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.weight (account));
response_l.put ("weight", balance.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::accounts_balances ()
{
boost::property_tree::ptree balances;
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree entry;
auto balance (node.balance_pending (account));
entry.put ("balance", balance.first.convert_to<std::string> ());
entry.put ("pending", balance.second.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
response_errors ();
}
void nano::json_handler::accounts_create ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
auto count (rpc_l->count_impl ());
if (!rpc_l->ec)
{
const bool generate_work = rpc_l->request.get<bool> ("work", false);
boost::property_tree::ptree accounts;
for (auto i (0); accounts.size () < count; ++i)
{
nano::account new_key (wallet->deterministic_insert (generate_work));
if (!new_key.is_zero ())
{
boost::property_tree::ptree entry;
entry.put ("", new_key.to_account ());
accounts.push_back (std::make_pair ("", entry));
}
}
rpc_l->response_l.add_child ("accounts", accounts);
}
rpc_l->response_errors ();
});
}
void nano::json_handler::accounts_frontiers ()
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
auto latest (node.ledger.latest (transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
}
response_l.add_child ("frontiers", frontiers);
response_errors ();
}
void nano::json_handler::accounts_pending ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
const bool sorting = request.get<bool> ("sorting", false);
auto simple (threshold.is_zero () && !source && !sorting); // if simple, response is a list of hashes for each account
boost::property_tree::ptree pending;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree peers_l;
for (auto i (node.store.pending_begin (transaction, nano::pending_key (account, 0))); nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key const & key (i->first);
if (block_confirmed (node, transaction, key.hash, include_active, include_only_confirmed))
{
if (simple)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info const & info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
pending_tree.put ("source", info.source.to_account ());
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (sorting && !simple)
{
if (source)
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("amount") > child2.second.template get<nano::uint128_t> ("amount");
});
}
else
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("") > child2.second.template get<nano::uint128_t> ("");
});
}
}
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
response_errors ();
}
void nano::json_handler::active_difficulty ()
{
auto include_trend (request.get<bool> ("include_trend", false));
response_l.put ("network_minimum", nano::to_string_hex (node.network_params.network.publish_threshold));
auto difficulty_active = node.active.active_difficulty ();
response_l.put ("network_current", nano::to_string_hex (difficulty_active));
auto multiplier = nano::difficulty::to_multiplier (difficulty_active, node.network_params.network.publish_threshold);
response_l.put ("multiplier", nano::to_string (multiplier));
if (include_trend)
{
boost::property_tree::ptree trend_entry_l;
auto trend_l (node.active.difficulty_trend ());
for (auto multiplier_l : trend_l)
{
boost::property_tree::ptree entry;
entry.put ("", nano::to_string (multiplier_l));
trend_entry_l.push_back (std::make_pair ("", entry));
}
response_l.add_child ("difficulty_trend", trend_entry_l);
}
response_errors ();
}
void nano::json_handler::available_supply ()
{
auto genesis_balance (node.balance (node.network_params.ledger.genesis_account)); // Cold storage genesis
auto landing_balance (node.balance (nano::account ("059F68AAB29DE0D3A27443625C7EA9CDDB6517A8B76FE37727EF6A4D76832AD5"))); // Active unavailable account
auto faucet_balance (node.balance (nano::account ("8E319CE6F3025E5B2DF66DA7AB1467FE48F1679C13DD43BFDB29FA2E9FC40D3B"))); // Faucet account
auto burned_balance ((node.balance_pending (nano::account (0))).second); // Burning 0 account
auto available (node.network_params.ledger.genesis_amount - genesis_balance - landing_balance - faucet_balance - burned_balance);
response_l.put ("available", available.convert_to<std::string> ());
response_errors ();
}
void state_subtype (nano::transaction const & transaction_a, nano::node & node_a, std::shared_ptr<nano::block> block_a, nano::uint128_t const & balance_a, boost::property_tree::ptree & tree_a)
{
// Subtype check
auto previous_balance (node_a.ledger.balance (transaction_a, block_a->previous ()));
if (balance_a < previous_balance)
{
tree_a.put ("subtype", "send");
}
else
{
if (block_a->link ().is_zero ())
{
tree_a.put ("subtype", "change");
}
else if (balance_a == previous_balance && node_a.ledger.is_epoch_link (block_a->link ()))
{
tree_a.put ("subtype", "epoch");
}
else
{
tree_a.put ("subtype", "receive");
}
}
}
void nano::json_handler::block_info ()
{
auto hash (hash_impl ());
if (!ec)
{
nano::block_sideband sideband;
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash, &sideband));
if (block != nullptr)
{
nano::account account (block->account ().is_zero () ? sideband.account : block->account ());
response_l.put ("block_account", account.to_account ());
auto amount (node.ledger.amount (transaction, hash));
response_l.put ("amount", amount.convert_to<std::string> ());
auto balance (node.ledger.balance (transaction, hash));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("height", std::to_string (sideband.height));
response_l.put ("local_timestamp", std::to_string (sideband.timestamp));
auto confirmed (node.ledger.block_confirmed (transaction, hash));
response_l.put ("confirmed", confirmed);
bool json_block_l = request.get<bool> ("json_block", false);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
response_l.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
response_l.put ("contents", contents);
}
if (block->type () == nano::block_type::state)
{
state_subtype (transaction, node, block, balance, response_l);
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::block_confirm ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block_l (node.store.block_get (transaction, hash));
if (block_l != nullptr)
{
if (!node.ledger.block_confirmed (transaction, hash))
{
// Start new confirmation for unconfirmed block
node.block_confirm (std::move (block_l));
}
else
{
// Add record in confirmation history for confirmed block
nano::election_status status{ block_l, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), 0, nano::election_status_type::active_confirmation_height };
{
nano::lock_guard<std::mutex> lock (node.active.mutex);
node.active.confirmed.push_back (status);
if (node.active.confirmed.size () > node.config.confirmation_history_size)
{
node.active.confirmed.pop_front ();
}
}
// Trigger callback for confirmed block
node.block_arrival.add (hash);
auto account (node.ledger.account (transaction, hash));
auto amount (node.ledger.amount (transaction, hash));
bool is_state_send (false);
if (auto state = dynamic_cast<nano::state_block *> (block_l.get ()))
{
is_state_send = node.ledger.is_send (transaction, *state);
}
node.observers.blocks.notify (status, account, amount, is_state_send);
}
response_l.put ("started", "1");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::blocks ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::block_hash hash;
if (!hash.decode_hex (hash_text))
{
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
blocks.add_child (hash_text, block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
blocks.put (hash_text, contents);
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
response_l.add_child ("blocks", blocks);
response_errors ();
}
void nano::json_handler::blocks_info ()
{
const bool pending = request.get<bool> ("pending", false);
const bool source = request.get<bool> ("source", false);
const bool json_block_l = request.get<bool> ("json_block", false);
const bool include_not_found = request.get<bool> ("include_not_found", false);
boost::property_tree::ptree blocks;
boost::property_tree::ptree blocks_not_found;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::block_hash hash;
if (!hash.decode_hex (hash_text))
{
nano::block_sideband sideband;
auto block (node.store.block_get (transaction, hash, &sideband));
if (block != nullptr)
{
boost::property_tree::ptree entry;
nano::account account (block->account ().is_zero () ? sideband.account : block->account ());
entry.put ("block_account", account.to_account ());
auto amount (node.ledger.amount (transaction, hash));
entry.put ("amount", amount.convert_to<std::string> ());
auto balance (node.ledger.balance (transaction, hash));
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("height", std::to_string (sideband.height));
entry.put ("local_timestamp", std::to_string (sideband.timestamp));
auto confirmed (node.ledger.block_confirmed (transaction, hash));
entry.put ("confirmed", confirmed);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
entry.put ("contents", contents);
}
if (block->type () == nano::block_type::state)
{
state_subtype (transaction, node, block, balance, entry);
}
if (pending)
{
bool exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending_exists (transaction, nano::pending_key (destination, hash));
}
entry.put ("pending", exists ? "1" : "0");
}
if (source)
{
nano::block_hash source_hash (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block_get (transaction, source_hash));
if (block_a != nullptr)
{
auto source_account (node.ledger.account (transaction, source_hash));
entry.put ("source_account", source_account.to_account ());
}
else
{
entry.put ("source_account", "0");
}
}
blocks.push_back (std::make_pair (hash_text, entry));
}
else if (include_not_found)
{
boost::property_tree::ptree entry;
entry.put ("", hash_text);
blocks_not_found.push_back (std::make_pair ("", entry));
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
if (!ec)
{
response_l.add_child ("blocks", blocks);
if (include_not_found)
{
response_l.add_child ("blocks_not_found", blocks_not_found);
}
}
response_errors ();
}
void nano::json_handler::block_account ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block_exists (transaction, hash))
{
auto account (node.ledger.account (transaction, hash));
response_l.put ("account", account.to_account ());
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::block_count ()
{
auto transaction (node.store.tx_begin_read ());
response_l.put ("count", std::to_string (node.store.block_count (transaction).sum ()));
response_l.put ("unchecked", std::to_string (node.store.unchecked_count (transaction)));
response_l.put ("cemented", std::to_string (node.ledger.cemented_count));
response_errors ();
}
void nano::json_handler::block_count_type ()
{
auto transaction (node.store.tx_begin_read ());
nano::block_counts count (node.store.block_count (transaction));
response_l.put ("send", std::to_string (count.send));
response_l.put ("receive", std::to_string (count.receive));
response_l.put ("open", std::to_string (count.open));
response_l.put ("change", std::to_string (count.change));
response_l.put ("state", std::to_string (count.state));
response_errors ();
}
void nano::json_handler::block_create ()
{
std::string type (request.get<std::string> ("type"));
nano::wallet_id wallet (0);
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized ())
{
if (wallet.decode_hex (wallet_text.get ()))
{
ec = nano::error_common::bad_wallet_number;
}
}
nano::account account (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (!ec && account_text.is_initialized ())
{
account = account_impl (account_text.get ());
}
nano::account representative (0);
boost::optional<std::string> representative_text (request.get_optional<std::string> ("representative"));
if (!ec && representative_text.is_initialized ())
{
representative = account_impl (representative_text.get (), nano::error_rpc::bad_representative_number);
}
nano::account destination (0);
boost::optional<std::string> destination_text (request.get_optional<std::string> ("destination"));
if (!ec && destination_text.is_initialized ())
{
destination = account_impl (destination_text.get (), nano::error_rpc::bad_destination);
}
nano::block_hash source (0);
boost::optional<std::string> source_text (request.get_optional<std::string> ("source"));
if (!ec && source_text.is_initialized ())
{
if (source.decode_hex (source_text.get ()))
{
ec = nano::error_rpc::bad_source;
}
}
nano::amount amount (0);
boost::optional<std::string> amount_text (request.get_optional<std::string> ("amount"));
if (!ec && amount_text.is_initialized ())
{
if (amount.decode_dec (amount_text.get ()))
{
ec = nano::error_common::invalid_amount;
}
}
auto work (work_optional_impl ());
nano::raw_key prv;
prv.data.clear ();
nano::block_hash previous (0);
nano::amount balance (0);
if (work == 0 && !node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
if (!ec && wallet != 0 && account != 0)
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_locked_impl (transaction, existing->second);
wallet_account_impl (transaction, existing->second, account);
if (!ec)
{
existing->second->store.fetch (transaction, account, prv);
previous = node.ledger.latest (block_transaction, account);
balance = node.ledger.account_balance (block_transaction, account);
}
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (!ec && key_text.is_initialized ())
{
if (prv.data.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
boost::optional<std::string> previous_text (request.get_optional<std::string> ("previous"));
if (!ec && previous_text.is_initialized ())
{
if (previous.decode_hex (previous_text.get ()))
{
ec = nano::error_rpc::bad_previous;
}
}
boost::optional<std::string> balance_text (request.get_optional<std::string> ("balance"));
if (!ec && balance_text.is_initialized ())
{
if (balance.decode_dec (balance_text.get ()))
{
ec = nano::error_rpc::invalid_balance;
}
}
nano::link link (0);
boost::optional<std::string> link_text (request.get_optional<std::string> ("link"));
if (!ec && link_text.is_initialized ())
{
if (link.decode_account (link_text.get ()))
{
if (link.decode_hex (link_text.get ()))
{
ec = nano::error_rpc::bad_link;
}
}
}
else
{
// Retrieve link from source or destination
if (source.is_zero ())
{
link = destination;
}
else
{
link = source;
}
}
if (!ec)
{
auto rpc_l (shared_from_this ());
// Serializes the block contents to the RPC response
auto block_response_put_l = [rpc_l, this](nano::block const & block_a) {
boost::property_tree::ptree response_l;
response_l.put ("hash", block_a.hash ().to_string ());
bool json_block_l = request.get<bool> ("json_block", false);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block_a.serialize_json (block_node_l);
response_l.add_child ("block", block_node_l);
}
else
{
std::string contents;
block_a.serialize_json (contents);
response_l.put ("block", contents);
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
rpc_l->response (ostream.str ());
};
// Wrapper from argument to lambda capture, to extend the block's scope
auto get_callback_l = [rpc_l, this, block_response_put_l](std::shared_ptr<nano::block> block_a) {
// Callback upon work generation success or failure
return [block_a, rpc_l, this, block_response_put_l](boost::optional<uint64_t> const & work_a) {
if (block_a != nullptr)
{
if (work_a.is_initialized ())
{
block_a->block_work_set (*work_a);
block_response_put_l (*block_a);
}
else
{
rpc_l->ec = nano::error_common::failure_work_generation;
}
}
else
{
rpc_l->ec = nano::error_common::generic;
}
if (rpc_l->ec)
{
rpc_l->response_errors ();
}
};
};
if (prv.data != 0)
{
nano::account pub (nano::pub_key (prv.as_private_key ()));
// Fetching account balance & previous for send blocks (if aren't given directly)
if (!previous_text.is_initialized () && !balance_text.is_initialized ())
{
auto transaction (node.store.tx_begin_read ());
previous = node.ledger.latest (transaction, pub);
balance = node.ledger.account_balance (transaction, pub);
}
// Double check current balance if previous block is specified
else if (previous_text.is_initialized () && balance_text.is_initialized () && type == "send")
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block_exists (transaction, previous) && node.store.block_balance (transaction, previous) != balance.number ())
{
ec = nano::error_rpc::block_create_balance_mismatch;
}
}
// Check for incorrect account key
if (!ec && account_text.is_initialized ())
{
if (account != pub)
{
ec = nano::error_rpc::block_create_public_key_mismatch;
}
}
nano::block_builder builder_l;
std::shared_ptr<nano::block> block_l{ nullptr };
nano::root root_l;
std::error_code ec_build;
if (type == "state")
{
if (previous_text.is_initialized () && !representative.is_zero () && (!link.is_zero () || link_text.is_initialized ()))
{
block_l = builder_l.state ()
.account (pub)
.previous (previous)
.representative (representative)
.balance (balance)
.link (link)
.sign (prv, pub)
.build (ec_build);
if (previous.is_zero ())
{
root_l = pub;
}
else
{
root_l = previous;
}
}
else
{
ec = nano::error_rpc::block_create_requirements_state;
}
}
else if (type == "open")
{
if (representative != 0 && source != 0)
{
block_l = builder_l.open ()
.account (pub)
.source (source)
.representative (representative)
.sign (prv, pub)
.build (ec_build);
root_l = pub;
}
else
{
ec = nano::error_rpc::block_create_requirements_open;
}
}
else if (type == "receive")
{
if (source != 0 && previous != 0)
{
block_l = builder_l.receive ()
.previous (previous)
.source (source)
.sign (prv, pub)
.build (ec_build);
root_l = previous;
}
else
{
ec = nano::error_rpc::block_create_requirements_receive;
}
}
else if (type == "change")
{
if (representative != 0 && previous != 0)
{
block_l = builder_l.change ()
.previous (previous)
.representative (representative)
.sign (prv, pub)
.build (ec_build);
root_l = previous;
}
else
{
ec = nano::error_rpc::block_create_requirements_change;
}
}
else if (type == "send")
{
if (destination != 0 && previous != 0 && balance != 0 && amount != 0)
{
if (balance.number () >= amount.number ())
{
block_l = builder_l.send ()
.previous (previous)
.destination (destination)
.balance (balance.number () - amount.number ())
.sign (prv, pub)
.build (ec_build);
root_l = previous;
}
else
{
ec = nano::error_common::insufficient_balance;
}
}
else
{
ec = nano::error_rpc::block_create_requirements_send;
}
}
else
{
ec = nano::error_blocks::invalid_type;
}
if (!ec && (!ec_build || ec_build == nano::error_common::missing_work))
{
if (work == 0)
{
node.work_generate (root_l, get_callback_l (block_l), nano::account (pub));
}
else
{
block_l->block_work_set (work);
block_response_put_l (*block_l);
}
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
// Because of callback
if (ec)
{
response_errors ();
}
}
void nano::json_handler::block_hash ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
std::shared_ptr<nano::block> block;
if (json_block_l)
{
block = block_json_impl (true);
}
else
{
block = block_impl (true);
}
if (!ec)
{
response_l.put ("hash", block->hash ().to_string ());
}
response_errors ();
}
void nano::json_handler::bootstrap ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
const bool bypass_frontier_confirmation = request.get<bool> ("bypass_frontier_confirmation", false);
boost::system::error_code address_ec;
auto address (boost::asio::ip::address_v6::from_string (address_text, address_ec));
if (!address_ec)
{
uint16_t port;
if (!nano::parse_port (port_text, port))
{
if (!node.flags.disable_legacy_bootstrap)
{
node.bootstrap_initiator.bootstrap (nano::endpoint (address, port), true, bypass_frontier_confirmation);
response_l.put ("success", "");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_legacy;
}
}
else
{
ec = nano::error_common::invalid_port;
}
}
else
{
ec = nano::error_common::invalid_ip_address;
}
response_errors ();
}
void nano::json_handler::bootstrap_any ()
{
const bool force = request.get<bool> ("force", false);
if (!node.flags.disable_legacy_bootstrap)
{
node.bootstrap_initiator.bootstrap (force);
response_l.put ("success", "");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_legacy;
}
response_errors ();
}
void nano::json_handler::bootstrap_lazy ()
{
auto hash (hash_impl ());
const bool force = request.get<bool> ("force", false);
if (!ec)
{
if (!node.flags.disable_lazy_bootstrap)
{
node.bootstrap_initiator.bootstrap_lazy (hash, force);
response_l.put ("started", "1");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_lazy;
}
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::bootstrap_status ()
{
auto attempt (node.bootstrap_initiator.current_attempt ());
if (attempt != nullptr)
{
nano::lock_guard<std::mutex> lock (attempt->mutex);
nano::lock_guard<std::mutex> lazy_lock (attempt->lazy_mutex);
response_l.put ("clients", std::to_string (attempt->clients.size ()));
response_l.put ("pulls", std::to_string (attempt->pulls.size ()));
response_l.put ("pulling", std::to_string (attempt->pulling));
response_l.put ("connections", std::to_string (attempt->connections));
response_l.put ("idle", std::to_string (attempt->idle.size ()));
response_l.put ("target_connections", std::to_string (attempt->target_connections (attempt->pulls.size ())));
response_l.put ("total_blocks", std::to_string (attempt->total_blocks));
response_l.put ("runs_count", std::to_string (attempt->runs_count));
response_l.put ("requeued_pulls", std::to_string (attempt->requeued_pulls));
response_l.put ("frontiers_received", static_cast<bool> (attempt->frontiers_received));
response_l.put ("frontiers_confirmed", static_cast<bool> (attempt->frontiers_confirmed));
std::string mode_text;
if (attempt->mode == nano::bootstrap_mode::legacy)
{
mode_text = "legacy";
}
else if (attempt->mode == nano::bootstrap_mode::lazy)
{
mode_text = "lazy";
}
else if (attempt->mode == nano::bootstrap_mode::wallet_lazy)
{
mode_text = "wallet_lazy";
}
response_l.put ("mode", mode_text);
response_l.put ("lazy_blocks", std::to_string (attempt->lazy_blocks.size ()));
response_l.put ("lazy_state_backlog", std::to_string (attempt->lazy_state_backlog.size ()));
response_l.put ("lazy_balances", std::to_string (attempt->lazy_balances.size ()));
response_l.put ("lazy_destinations", std::to_string (attempt->lazy_destinations.size ()));
response_l.put ("lazy_undefined_links", std::to_string (attempt->lazy_undefined_links.size ()));
response_l.put ("lazy_pulls", std::to_string (attempt->lazy_pulls.size ()));
response_l.put ("lazy_keys", std::to_string (attempt->lazy_keys.size ()));
if (!attempt->lazy_keys.empty ())
{
response_l.put ("lazy_key_1", (*(attempt->lazy_keys.begin ())).to_string ());
}
response_l.put ("duration", std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt->attempt_start).count ());
}
else
{
response_l.put ("active", "0");
}
response_errors ();
}
void nano::json_handler::chain (bool successors)
{
successors = successors != request.get<bool> ("reverse", false);
auto hash (hash_impl ("block"));
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
while (!hash.is_zero () && blocks.size () < count)
{
auto block_l (node.store.block_get (transaction, hash));
if (block_l != nullptr)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
hash = successors ? node.store.block_successor (transaction, hash) : block_l->previous ();
}
else
{
hash.clear ();
}
}
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::json_handler::confirmation_active ()
{
uint64_t announcements (0);
boost::optional<std::string> announcements_text (request.get_optional<std::string> ("announcements"));
if (announcements_text.is_initialized ())
{
announcements = strtoul (announcements_text.get ().c_str (), NULL, 10);
}
boost::property_tree::ptree elections;
{
nano::lock_guard<std::mutex> lock (node.active.mutex);
for (auto i (node.active.roots.begin ()), n (node.active.roots.end ()); i != n; ++i)
{
if (i->election->confirmation_request_count >= announcements && !i->election->confirmed && !i->election->stopped)
{
boost::property_tree::ptree entry;
entry.put ("", i->root.to_string ());
elections.push_back (std::make_pair ("", entry));
}
}
}
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::json_handler::confirmation_height_currently_processing ()
{
auto hash = node.pending_confirmation_height.current ();
if (!hash.is_zero ())
{
response_l.put ("hash", node.pending_confirmation_height.current ().to_string ());
}
else
{
ec = nano::error_rpc::confirmation_height_not_processing;
}
response_errors ();
}
void nano::json_handler::confirmation_history ()
{
boost::property_tree::ptree elections;
boost::property_tree::ptree confirmation_stats;
std::chrono::milliseconds running_total (0);
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
if (!ec)
{
auto confirmed (node.active.list_confirmed ());
for (auto i (confirmed.begin ()), n (confirmed.end ()); i != n; ++i)
{
if (hash.is_zero () || i->winner->hash () == hash)
{
boost::property_tree::ptree election;
election.put ("hash", i->winner->hash ().to_string ());
election.put ("duration", i->election_duration.count ());
election.put ("time", i->election_end.count ());
election.put ("tally", i->tally.to_string_dec ());
election.put ("request_count", i->confirmation_request_count);
elections.push_back (std::make_pair ("", election));
}
running_total += i->election_duration;
}
}
confirmation_stats.put ("count", elections.size ());
if (elections.size () >= 1)
{
confirmation_stats.put ("average", (running_total.count ()) / elections.size ());
}
response_l.add_child ("confirmation_stats", confirmation_stats);
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::json_handler::confirmation_info ()
{
const bool representatives = request.get<bool> ("representatives", false);
const bool contents = request.get<bool> ("contents", true);
const bool json_block_l = request.get<bool> ("json_block", false);
std::string root_text (request.get<std::string> ("root"));
nano::qualified_root root;
if (!root.decode_hex (root_text))
{
nano::lock_guard<std::mutex> lock (node.active.mutex);
auto conflict_info (node.active.roots.find (root));
if (conflict_info != node.active.roots.end ())
{
response_l.put ("announcements", std::to_string (conflict_info->election->confirmation_request_count));
auto election (conflict_info->election);
nano::uint128_t total (0);
response_l.put ("last_winner", election->status.winner->hash ().to_string ());
auto tally_l (election->tally ());
boost::property_tree::ptree blocks;
for (auto i (tally_l.begin ()), n (tally_l.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
auto const & tally (i->first);
entry.put ("tally", tally.convert_to<std::string> ());
total += tally;
if (contents)
{
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
i->second->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
i->second->serialize_json (contents);
entry.put ("contents", contents);
}
}
if (representatives)
{
std::multimap<nano::uint128_t, nano::account, std::greater<nano::uint128_t>> representatives;
for (auto ii (election->last_votes.begin ()), nn (election->last_votes.end ()); ii != nn; ++ii)
{
if (i->second->hash () == ii->second.hash)
{
nano::account const & representative (ii->first);
auto amount (node.ledger.rep_weights.representation_get (representative));
representatives.emplace (std::move (amount), representative);
}
}
boost::property_tree::ptree representatives_list;
for (auto ii (representatives.begin ()), nn (representatives.end ()); ii != nn; ++ii)
{
representatives_list.put (ii->second.to_account (), ii->first.convert_to<std::string> ());
}
entry.add_child ("representatives", representatives_list);
}
blocks.add_child ((i->second->hash ()).to_string (), entry);
}
response_l.put ("total_tally", total.convert_to<std::string> ());
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_rpc::confirmation_not_found;
}
}
else
{
ec = nano::error_rpc::invalid_root;
}
response_errors ();
}
void nano::json_handler::confirmation_quorum ()
{
response_l.put ("quorum_delta", node.delta ().convert_to<std::string> ());
response_l.put ("online_weight_quorum_percent", std::to_string (node.config.online_weight_quorum));
response_l.put ("online_weight_minimum", node.config.online_weight_minimum.to_string_dec ());
response_l.put ("online_stake_total", node.online_reps.online_stake ().convert_to<std::string> ());
response_l.put ("peers_stake_total", node.rep_crawler.total_weight ().convert_to<std::string> ());
response_l.put ("peers_stake_required", std::max (node.config.online_weight_minimum.number (), node.delta ()).convert_to<std::string> ());
if (request.get<bool> ("peer_details", false))
{
boost::property_tree::ptree peers;
for (auto & peer : node.rep_crawler.representatives ())
{
boost::property_tree::ptree peer_node;
peer_node.put ("account", peer.account.to_account ());
peer_node.put ("ip", peer.channel->to_string ());
peer_node.put ("weight", peer.weight.to_string_dec ());
peers.push_back (std::make_pair ("", peer_node));
}
response_l.add_child ("peers", peers);
}
response_errors ();
}
void nano::json_handler::database_txn_tracker ()
{
boost::property_tree::ptree json;
if (node.config.diagnostics_config.txn_tracking.enable)
{
unsigned min_read_time_milliseconds = 0;
boost::optional<std::string> min_read_time_text (request.get_optional<std::string> ("min_read_time"));
if (min_read_time_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<unsigned> (*min_read_time_text, min_read_time_milliseconds);
if (!success)
{
ec = nano::error_common::invalid_amount;
}
}
unsigned min_write_time_milliseconds = 0;
if (!ec)
{
boost::optional<std::string> min_write_time_text (request.get_optional<std::string> ("min_write_time"));
if (min_write_time_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<unsigned> (*min_write_time_text, min_write_time_milliseconds);
if (!success)
{
ec = nano::error_common::invalid_amount;
}
}
}
if (!ec)
{
node.store.serialize_mdb_tracker (json, std::chrono::milliseconds (min_read_time_milliseconds), std::chrono::milliseconds (min_write_time_milliseconds));
response_l.put_child ("txn_tracking", json);
}
}
else
{
ec = nano::error_common::tracking_not_enabled;
}
response_errors ();
}
void nano::json_handler::delegators ()
{
auto account (account_impl ());
if (!ec)
{
boost::property_tree::ptree delegators;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
if (info.representative == account)
{
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
nano::account const & account (i->first);
delegators.put (account.to_account (), balance);
}
}
response_l.add_child ("delegators", delegators);
}
response_errors ();
}
void nano::json_handler::delegators_count ()
{
auto account (account_impl ());
if (!ec)
{
uint64_t count (0);
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
if (info.representative == account)
{
++count;
}
}
response_l.put ("count", std::to_string (count));
}
response_errors ();
}
void nano::json_handler::deterministic_key ()
{
std::string seed_text (request.get<std::string> ("seed"));
std::string index_text (request.get<std::string> ("index"));
nano::raw_key seed;
if (!seed.data.decode_hex (seed_text))
{
try
{
uint32_t index (std::stoul (index_text));
nano::private_key prv = nano::deterministic_key (seed, index);
nano::public_key pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
catch (std::logic_error const &)
{
ec = nano::error_common::invalid_index;
}
}
else
{
ec = nano::error_common::bad_seed;
}
response_errors ();
}
void epoch_upgrader (std::shared_ptr<nano::node> node_a, nano::private_key const & prv_a, nano::epoch epoch_a, uint64_t count_limit)
{
uint64_t const upgrade_batch_size = 1000;
nano::block_builder builder;
auto link (node_a->ledger.epoch_link (epoch_a));
nano::raw_key raw_key;
raw_key.data = prv_a;
auto signer (nano::pub_key (prv_a));
assert (signer == node_a->ledger.epoch_signer (link));
class account_upgrade_item final
{
public:
nano::account account{ 0 };
uint64_t modified{ 0 };
};
class account_tag
{
};
class modified_tag
{
};
boost::multi_index_container<
account_upgrade_item,
boost::multi_index::indexed_by<
boost::multi_index::ordered_non_unique<boost::multi_index::tag<modified_tag>, boost::multi_index::member<account_upgrade_item, uint64_t, &account_upgrade_item::modified>, std::greater<uint64_t>>,
boost::multi_index::hashed_unique<boost::multi_index::tag<account_tag>, boost::multi_index::member<account_upgrade_item, nano::account, &account_upgrade_item::account>>>>
accounts_list;
bool finished_upgrade (false);
while (!finished_upgrade && !node_a->stopped)
{
bool finished_accounts (false);
uint64_t total_upgraded_accounts (0);
while (!finished_accounts && count_limit != 0 && !node_a->stopped)
{
{
auto transaction (node_a->store.tx_begin_read ());
// Collect accounts to upgrade
for (auto i (node_a->store.latest_begin (transaction)), n (node_a->store.latest_end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info const & info (i->second);
if (info.epoch () < epoch_a)
{
release_assert (nano::epochs::is_sequential (info.epoch (), epoch_a));
accounts_list.insert (account_upgrade_item{ account, info.modified });
}
}
}
/* Upgrade accounts
Repeat until accounts with previous epoch exist in latest table */
uint64_t upgraded_accounts (0);
for (auto i (accounts_list.get<modified_tag> ().begin ()), n (accounts_list.get<modified_tag> ().end ()); i != n && upgraded_accounts < upgrade_batch_size && upgraded_accounts < count_limit && !node_a->stopped; ++i)
{
auto transaction (node_a->store.tx_begin_read ());
nano::account_info info;
if (!node_a->store.account_get (transaction, i->account, info) && info.epoch () < epoch_a)
{
auto epoch = builder.state ()
.account (i->account)
.previous (info.head)
.representative (info.representative)
.balance (info.balance)
.link (link)
.sign (raw_key, signer)
.work (node_a->work_generate_blocking (info.head).value_or (0))
.build ();
bool valid_signature (!nano::validate_message (signer, epoch->hash (), epoch->block_signature ()));
bool valid_work (!nano::work_validate (*epoch.get ()));
nano::process_result result (nano::process_result::old);
if (valid_signature && valid_work)
{
result = node_a->process_local (std::move (epoch)).code;
}
if (result == nano::process_result::progress)
{
++upgraded_accounts;
}
else
{
bool fork (result == nano::process_result::fork);
node_a->logger.always_log (boost::str (boost::format ("Failed to upgrade account %1%. Valid signature: %2%. Valid work: %3%. Block processor fork: %4%") % i->account.to_account () % valid_signature % valid_work % fork));
}
}
}
total_upgraded_accounts += upgraded_accounts;
count_limit -= upgraded_accounts;
if (!accounts_list.empty ())
{
node_a->logger.always_log (boost::str (boost::format ("%1% accounts were upgraded to new epoch, %2% remain...") % total_upgraded_accounts % (accounts_list.size () - upgraded_accounts)));
accounts_list.clear ();
}
else
{
node_a->logger.always_log (boost::str (boost::format ("%1% total accounts were upgraded to new epoch") % total_upgraded_accounts));
finished_accounts = true;
}
}
// Pending blocks upgrade
bool finished_pending (false);
uint64_t total_upgraded_pending (0);
while (!finished_pending && count_limit != 0 && !node_a->stopped)
{
uint64_t upgraded_pending (0);
auto transaction (node_a->store.tx_begin_read ());
for (auto i (node_a->store.pending_begin (transaction, nano::pending_key (1, 0))), n (node_a->store.pending_end ()); i != n && upgraded_pending < upgrade_batch_size && upgraded_pending < count_limit && !node_a->stopped;)
{
bool to_next_account (false);
nano::pending_key const & key (i->first);
if (!node_a->store.account_exists (transaction, key.account))
{
nano::pending_info const & info (i->second);
if (info.epoch < epoch_a)
{
release_assert (nano::epochs::is_sequential (info.epoch, epoch_a));
auto epoch = builder.state ()
.account (key.account)
.previous (0)
.representative (0)
.balance (0)
.link (link)
.sign (raw_key, signer)
.work (node_a->work_generate_blocking (key.account).value_or (0))
.build ();
bool valid_signature (!nano::validate_message (signer, epoch->hash (), epoch->block_signature ()));
bool valid_work (!nano::work_validate (*epoch.get ()));
nano::process_result result (nano::process_result::old);
if (valid_signature && valid_work)
{
result = node_a->process_local (std::move (epoch)).code;
}
if (result == nano::process_result::progress)
{
++upgraded_pending;
to_next_account = true;
}
else
{
bool fork (result == nano::process_result::fork);
node_a->logger.always_log (boost::str (boost::format ("Failed to upgrade account with pending blocks %1%. Valid signature: %2%. Valid work: %3%. Block processor fork: %4%") % key.account.to_account () % valid_signature % valid_work % fork));
}
}
}
else
{
to_next_account = true;
}
if (to_next_account)
{
// Move to next account if pending account exists or was upgraded
if (key.account.number () == std::numeric_limits<nano::uint256_t>::max ())
{
break;
}
else
{
i = node_a->store.pending_begin (transaction, nano::pending_key (key.account.number () + 1, 0));
}
}
else
{
// Move to next pending item
++i;
}
}
total_upgraded_pending += upgraded_pending;
count_limit -= upgraded_pending;
// Repeat if some pending accounts were upgraded
if (upgraded_pending != 0)
{
node_a->logger.always_log (boost::str (boost::format ("%1% unopened accounts with pending blocks were upgraded to new epoch...") % total_upgraded_pending));
}
else
{
node_a->logger.always_log (boost::str (boost::format ("%1% total unopened accounts with pending blocks were upgraded to new epoch") % total_upgraded_pending));
finished_pending = true;
}
}
finished_upgrade = (total_upgraded_accounts == 0) && (total_upgraded_pending == 0);
}
node_a->logger.always_log ("Epoch upgrade is completed");
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::epoch_upgrade ()
{
nano::epoch epoch (nano::epoch::invalid);
uint8_t epoch_int (request.get<uint8_t> ("epoch"));
switch (epoch_int)
{
case 1:
epoch = nano::epoch::epoch_1;
break;
case 2:
epoch = nano::epoch::epoch_2;
break;
default:
break;
}
if (epoch != nano::epoch::invalid)
{
uint64_t count_limit (count_optional_impl ());
std::string key_text (request.get<std::string> ("key"));
nano::private_key prv;
if (!prv.decode_hex (key_text))
{
if (nano::pub_key (prv) == node.ledger.epoch_signer (node.ledger.epoch_link (epoch)))
{
auto node_l (node.shared ());
node.worker.push_task ([node_l, prv, epoch, count_limit]() {
epoch_upgrader (node_l, prv, epoch, count_limit);
});
response_l.put ("started", "1");
}
else
{
ec = nano::error_rpc::invalid_epoch_signer;
}
}
else
{
ec = nano::error_common::bad_private_key;
}
}
else
{
ec = nano::error_rpc::invalid_epoch;
}
response_errors ();
}
void nano::json_handler::frontiers ()
{
auto start (account_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n && frontiers.size () < count; ++i)
{
frontiers.put (i->first.to_account (), i->second.head.to_string ());
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::json_handler::account_count ()
{
auto transaction (node.store.tx_begin_read ());
auto size (node.store.account_count (transaction));
response_l.put ("count", std::to_string (size));
response_errors ();
}
namespace
{
class history_visitor : public nano::block_visitor
{
public:
history_visitor (nano::json_handler & handler_a, bool raw_a, nano::transaction & transaction_a, boost::property_tree::ptree & tree_a, nano::block_hash const & hash_a, std::vector<nano::public_key> const & accounts_filter_a) :
handler (handler_a),
raw (raw_a),
transaction (transaction_a),
tree (tree_a),
hash (hash_a),
accounts_filter (accounts_filter_a)
{
}
virtual ~history_visitor () = default;
void send_block (nano::send_block const & block_a)
{
if (should_ignore_account (block_a.hashables.destination))
{
return;
}
tree.put ("type", "send");
auto account (block_a.hashables.destination.to_account ());
tree.put ("account", account);
auto amount (handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
tree.put ("amount", amount);
if (raw)
{
tree.put ("destination", account);
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void receive_block (nano::receive_block const & block_a)
{
tree.put ("type", "receive");
auto account (handler.node.ledger.account (transaction, block_a.hashables.source).to_account ());
tree.put ("account", account);
auto amount (handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
tree.put ("amount", amount);
if (raw)
{
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void open_block (nano::open_block const & block_a)
{
if (raw)
{
tree.put ("type", "open");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("opened", block_a.hashables.account.to_account ());
}
else
{
// Report opens as a receive
tree.put ("type", "receive");
}
if (block_a.hashables.source != network_params.ledger.genesis_account)
{
tree.put ("account", handler.node.ledger.account (transaction, block_a.hashables.source).to_account ());
tree.put ("amount", handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
}
else
{
tree.put ("account", network_params.ledger.genesis_account.to_account ());
tree.put ("amount", network_params.ledger.genesis_amount.convert_to<std::string> ());
}
}
void change_block (nano::change_block const & block_a)
{
if (raw && accounts_filter.empty ())
{
tree.put ("type", "change");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void state_block (nano::state_block const & block_a)
{
if (raw)
{
tree.put ("type", "state");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("link", block_a.hashables.link.to_string ());
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
auto balance (block_a.hashables.balance.number ());
auto previous_balance (handler.node.ledger.balance (transaction, block_a.hashables.previous));
if (balance < previous_balance)
{
if (should_ignore_account (block_a.hashables.link))
{
tree.clear ();
return;
}
if (raw)
{
tree.put ("subtype", "send");
}
else
{
tree.put ("type", "send");
}
tree.put ("account", block_a.hashables.link.to_account ());
tree.put ("amount", (previous_balance - balance).convert_to<std::string> ());
}
else
{
if (block_a.hashables.link.is_zero ())
{
if (raw && accounts_filter.empty ())
{
tree.put ("subtype", "change");
}
}
else if (balance == previous_balance && handler.node.ledger.is_epoch_link (block_a.hashables.link))
{
if (raw && accounts_filter.empty ())
{
tree.put ("subtype", "epoch");
tree.put ("account", handler.node.ledger.epoch_signer (block_a.link ()).to_account ());
}
}
else
{
auto account (handler.node.ledger.account (transaction, block_a.hashables.link));
if (should_ignore_account (account))
{
tree.clear ();
return;
}
if (raw)
{
tree.put ("subtype", "receive");
}
else
{
tree.put ("type", "receive");
}
tree.put ("account", account.to_account ());
tree.put ("amount", (balance - previous_balance).convert_to<std::string> ());
}
}
}
bool should_ignore_account (nano::public_key const & account)
{
bool ignore (false);
if (!accounts_filter.empty ())
{
if (std::find (accounts_filter.begin (), accounts_filter.end (), account) == accounts_filter.end ())
{
ignore = true;
}
}
return ignore;
}
nano::json_handler & handler;
bool raw;
nano::transaction & transaction;
boost::property_tree::ptree & tree;
nano::block_hash const & hash;
nano::network_params network_params;
std::vector<nano::public_key> const & accounts_filter;
};
}
void nano::json_handler::account_history ()
{
std::vector<nano::public_key> accounts_to_filter;
const auto accounts_filter_node = request.get_child_optional ("account_filter");
if (accounts_filter_node.is_initialized ())
{
for (auto & a : (*accounts_filter_node))
{
auto account (account_impl (a.second.get<std::string> ("")));
if (!ec)
{
accounts_to_filter.push_back (account);
}
else
{
break;
}
}
}
nano::account account;
nano::block_hash hash;
bool reverse (request.get_optional<bool> ("reverse") == true);
auto head_str (request.get_optional<std::string> ("head"));
auto transaction (node.store.tx_begin_read ());
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (head_str)
{
if (!hash.decode_hex (*head_str))
{
if (node.store.block_exists (transaction, hash))
{
account = node.ledger.account (transaction, hash);
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
else
{
account = account_impl ();
if (!ec)
{
if (reverse)
{
auto info (account_info_impl (transaction, account));
if (!ec)
{
hash = info.open_block;
}
}
else
{
hash = node.ledger.latest (transaction, account);
}
}
}
if (!ec)
{
boost::property_tree::ptree history;
bool output_raw (request.get_optional<bool> ("raw") == true);
response_l.put ("account", account.to_account ());
nano::block_sideband sideband;
auto block (node.store.block_get (transaction, hash, &sideband));
while (block != nullptr && count > 0)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
history_visitor visitor (*this, output_raw, transaction, entry, hash, accounts_to_filter);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("local_timestamp", std::to_string (sideband.timestamp));
entry.put ("height", std::to_string (sideband.height));
entry.put ("hash", hash.to_string ());
if (output_raw)
{
entry.put ("work", nano::to_string_hex (block->block_work ()));
entry.put ("signature", block->block_signature ().to_string ());
}
history.push_back (std::make_pair ("", entry));
--count;
}
}
hash = reverse ? node.store.block_successor (transaction, hash) : block->previous ();
block = node.store.block_get (transaction, hash, &sideband);
}
response_l.add_child ("history", history);
if (!hash.is_zero ())
{
response_l.put (reverse ? "next" : "previous", hash.to_string ());
}
}
response_errors ();
}
void nano::json_handler::keepalive ()
{
if (!ec)
{
std::string address_text (request.get<std::string> ("address"));
std::string port_text (request.get<std::string> ("port"));
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.keepalive (address_text, port);
response_l.put ("started", "1");
}
else
{
ec = nano::error_common::invalid_port;
}
}
response_errors ();
}
void nano::json_handler::key_create ()
{
nano::keypair pair;
response_l.put ("private", pair.prv.data.to_string ());
response_l.put ("public", pair.pub.to_string ());
response_l.put ("account", pair.pub.to_account ());
response_errors ();
}
void nano::json_handler::key_expand ()
{
std::string key_text (request.get<std::string> ("key"));
nano::private_key prv;
if (!prv.decode_hex (key_text))
{
nano::public_key pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_private_key;
}
response_errors ();
}
void nano::json_handler::ledger ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
nano::account start (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start = account_impl (account_text.get ());
}
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
if (decode_unsigned (modified_since_text.get (), modified_since))
{
ec = nano::error_rpc::invalid_timestamp;
}
}
const bool sorting = request.get<bool> ("sorting", false);
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
boost::property_tree::ptree accounts;
auto transaction (node.store.tx_begin_read ());
if (!ec && !sorting) // Simple
{
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n && accounts.size () < count; ++i)
{
nano::account_info const & info (i->second);
if (info.modified >= modified_since && (pending || info.balance.number () >= threshold.number ()))
{
nano::account const & account (i->first);
boost::property_tree::ptree response_a;
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
if (info.balance.number () + account_pending < threshold.number ())
{
continue;
}
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
response_a.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
else if (!ec) // Sorting
{
std::vector<std::pair<nano::uint128_union, nano::account>> ledger_l;
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
nano::uint128_union balance (info.balance);
if (info.modified >= modified_since)
{
ledger_l.emplace_back (balance, i->first);
}
}
std::sort (ledger_l.begin (), ledger_l.end ());
std::reverse (ledger_l.begin (), ledger_l.end ());
nano::account_info info;
for (auto i (ledger_l.begin ()), n (ledger_l.end ()); i != n && accounts.size () < count; ++i)
{
node.store.account_get (transaction, i->second, info);
if (pending || info.balance.number () >= threshold.number ())
{
nano::account const & account (i->second);
boost::property_tree::ptree response_a;
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
if (info.balance.number () + account_pending < threshold.number ())
{
continue;
}
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
(i->first).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
response_a.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::mnano_from_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () / ratio);
response_l.put ("amount", result.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::mnano_to_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () * ratio);
if (result > amount.number ())
{
response_l.put ("amount", result.convert_to<std::string> ());
}
else
{
ec = nano::error_common::invalid_amount_big;
}
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::node_id ()
{
if (!ec)
{
response_l.put ("private", node.node_id.prv.data.to_string ());
response_l.put ("public", node.node_id.pub.to_string ());
response_l.put ("as_account", node.node_id.pub.to_account ());
response_l.put ("node_id", node.node_id.pub.to_node_id ());
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::node_id_delete ()
{
response_l.put ("deprecated", "1");
response_errors ();
}
void nano::json_handler::password_change ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
if (!rpc_l->ec)
{
std::string password_text (rpc_l->request.get<std::string> ("password"));
bool error (wallet->store.rekey (transaction, password_text));
rpc_l->response_l.put ("changed", error ? "0" : "1");
if (!error)
{
rpc_l->node.logger.try_log ("Wallet password changed");
}
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::password_enter ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string password_text (rpc_l->request.get<std::string> ("password"));
auto transaction (wallet->wallets.tx_begin_write ());
auto error (wallet->enter_password (transaction, password_text));
rpc_l->response_l.put ("valid", error ? "0" : "1");
}
rpc_l->response_errors ();
});
}
void nano::json_handler::password_valid (bool wallet_locked)
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
if (!wallet_locked)
{
response_l.put ("valid", valid ? "1" : "0");
}
else
{
response_l.put ("locked", valid ? "0" : "1");
}
}
response_errors ();
}
void nano::json_handler::peers ()
{
boost::property_tree::ptree peers_l;
const bool peer_details = request.get<bool> ("peer_details", false);
auto peers_list (node.network.list (std::numeric_limits<size_t>::max ()));
std::sort (peers_list.begin (), peers_list.end (), [](const auto & lhs, const auto & rhs) {
return lhs->get_endpoint () < rhs->get_endpoint ();
});
for (auto i (peers_list.begin ()), n (peers_list.end ()); i != n; ++i)
{
std::stringstream text;
auto channel (*i);
text << channel->to_string ();
if (peer_details)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("protocol_version", std::to_string (channel->get_network_version ()));
auto node_id_l (channel->get_node_id_optional ());
if (node_id_l.is_initialized ())
{
pending_tree.put ("node_id", node_id_l.get ().to_node_id ());
}
else
{
pending_tree.put ("node_id", "");
}
pending_tree.put ("type", channel->get_type () == nano::transport::transport_type::tcp ? "tcp" : "udp");
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), pending_tree));
}
else
{
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), boost::property_tree::ptree (std::to_string (channel->get_network_version ()))));
}
}
response_l.add_child ("peers", peers_l);
response_errors ();
}
void nano::json_handler::pending ()
{
auto account (account_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
const bool sorting = request.get<bool> ("sorting", false);
auto simple (threshold.is_zero () && !source && !min_version && !sorting); // if simple, response is a list of hashes
if (!ec)
{
boost::property_tree::ptree peers_l;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.pending_begin (transaction, nano::pending_key (account, 0))); nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key const & key (i->first);
if (block_confirmed (node, transaction, key.hash, include_active, include_only_confirmed))
{
if (simple)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info const & info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", epoch_as_string (info.epoch));
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (sorting && !simple)
{
if (source || min_version)
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("amount") > child2.second.template get<nano::uint128_t> ("amount");
});
}
else
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("") > child2.second.template get<nano::uint128_t> ("");
});
}
}
response_l.add_child ("blocks", peers_l);
}
response_errors ();
}
void nano::json_handler::pending_exists ()
{
auto hash (hash_impl ());
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
auto exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending_exists (transaction, nano::pending_key (destination, hash));
}
exists = exists && (block_confirmed (node, transaction, block->hash (), include_active, include_only_confirmed));
response_l.put ("exists", exists ? "1" : "0");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::payment_begin ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
std::string id_text (rpc_l->request.get<std::string> ("wallet"));
nano::wallet_id id;
if (!id.decode_hex (id_text))
{
auto existing (rpc_l->node.wallets.items.find (id));
if (existing != rpc_l->node.wallets.items.end ())
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
std::shared_ptr<nano::wallet> wallet (existing->second);
if (wallet->store.valid_password (transaction))
{
nano::account account (0);
do
{
auto existing (wallet->free_accounts.begin ());
if (existing != wallet->free_accounts.end ())
{
account = *existing;
wallet->free_accounts.erase (existing);
if (wallet->store.find (transaction, account) == wallet->store.end ())
{
rpc_l->node.logger.always_log (boost::str (boost::format ("Transaction wallet %1% externally modified listing account %2% as free but no longer exists") % id.to_string () % account.to_account ()));
account.clear ();
}
else
{
auto block_transaction (rpc_l->node.store.tx_begin_read ());
if (!rpc_l->node.ledger.account_balance (block_transaction, account).is_zero ())
{
rpc_l->node.logger.always_log (boost::str (boost::format ("Skipping account %1% for use as a transaction account: non-zero balance") % account.to_account ()));
account.clear ();
}
}
}
else
{
account = wallet->deterministic_insert (transaction);
break;
}
} while (account.is_zero ());
if (!account.is_zero ())
{
rpc_l->response_l.put ("deprecated", "1");
rpc_l->response_l.put ("account", account.to_account ());
}
else
{
rpc_l->ec = nano::error_rpc::payment_unable_create_account;
}
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::wallet_not_found;
}
}
else
{
rpc_l->ec = nano::error_common::bad_wallet_number;
}
rpc_l->response_errors ();
});
}
void nano::json_handler::payment_init ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
wallet->init_free_accounts (transaction);
rpc_l->response_l.put ("deprecated", "1");
rpc_l->response_l.put ("status", "Ready");
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::payment_end ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
if (node.ledger.account_balance (block_transaction, account).is_zero ())
{
wallet->free_accounts.insert (account);
response_l.put ("deprecated", "1");
response_l.put ("ended", "1");
}
else
{
ec = nano::error_rpc::payment_account_balance;
}
}
}
response_errors ();
}
void nano::json_handler::payment_wait ()
{
std::string timeout_text (request.get<std::string> ("timeout"));
auto account (account_impl ());
auto amount (amount_impl ());
if (!ec)
{
uint64_t timeout;
if (!decode_unsigned (timeout_text, timeout))
{
{
auto observer (std::make_shared<nano::json_payment_observer> (node, response, account, amount));
observer->start (timeout);
node.payment_observer_processor.add (account, observer);
}
node.payment_observer_processor.observer_action (account);
}
else
{
ec = nano::error_rpc::bad_timeout;
}
}
if (ec)
{
response_errors ();
}
}
void nano::json_handler::process ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
const bool json_block_l = rpc_l->request.get<bool> ("json_block", false);
const bool watch_work_l = rpc_l->request.get<bool> ("watch_work", true);
std::shared_ptr<nano::block> block;
if (json_block_l)
{
block = rpc_l->block_json_impl (true);
}
else
{
block = rpc_l->block_impl (true);
}
// State blocks subtype check
if (!rpc_l->ec && block->type () == nano::block_type::state)
{
std::string subtype_text (rpc_l->request.get<std::string> ("subtype", ""));
if (!subtype_text.empty ())
{
std::shared_ptr<nano::state_block> block_state (std::static_pointer_cast<nano::state_block> (block));
auto transaction (rpc_l->node.store.tx_begin_read ());
if (!block_state->hashables.previous.is_zero () && !rpc_l->node.store.block_exists (transaction, block_state->hashables.previous))
{
rpc_l->ec = nano::error_process::gap_previous;
}
else
{
auto balance (rpc_l->node.ledger.account_balance (transaction, block_state->hashables.account));
if (subtype_text == "send")
{
if (balance <= block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
// Send with previous == 0 fails balance check. No previous != 0 check required
}
else if (subtype_text == "receive")
{
if (balance > block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
// Receive can be point to open block. No previous != 0 check required
}
else if (subtype_text == "open")
{
if (!block_state->hashables.previous.is_zero ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_previous;
}
}
else if (subtype_text == "change")
{
if (balance != block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
else if (block_state->hashables.previous.is_zero ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_previous;
}
}
else if (subtype_text == "epoch")
{
if (balance != block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
else if (!rpc_l->node.ledger.is_epoch_link (block_state->hashables.link))
{
rpc_l->ec = nano::error_rpc::invalid_subtype_epoch_link;
}
}
else
{
rpc_l->ec = nano::error_rpc::invalid_subtype;
}
}
}
}
if (!rpc_l->ec)
{
if (!nano::work_validate (*block))
{
auto result (rpc_l->node.process_local (block, watch_work_l));
switch (result.code)
{
case nano::process_result::progress:
{
rpc_l->response_l.put ("hash", block->hash ().to_string ());
break;
}
case nano::process_result::gap_previous:
{
rpc_l->ec = nano::error_process::gap_previous;
break;
}
case nano::process_result::gap_source:
{
rpc_l->ec = nano::error_process::gap_source;
break;
}
case nano::process_result::old:
{
rpc_l->ec = nano::error_process::old;
break;
}
case nano::process_result::bad_signature:
{
rpc_l->ec = nano::error_process::bad_signature;
break;
}
case nano::process_result::negative_spend:
{
// TODO once we get RPC versioning, this should be changed to "negative spend"
rpc_l->ec = nano::error_process::negative_spend;
break;
}
case nano::process_result::balance_mismatch:
{
rpc_l->ec = nano::error_process::balance_mismatch;
break;
}
case nano::process_result::unreceivable:
{
rpc_l->ec = nano::error_process::unreceivable;
break;
}
case nano::process_result::block_position:
{
rpc_l->ec = nano::error_process::block_position;
break;
}
case nano::process_result::fork:
{
const bool force = rpc_l->request.get<bool> ("force", false);
if (force)
{
rpc_l->node.active.erase (*block);
rpc_l->node.block_processor.force (block);
rpc_l->response_l.put ("hash", block->hash ().to_string ());
}
else
{
rpc_l->ec = nano::error_process::fork;
}
break;
}
default:
{
rpc_l->ec = nano::error_process::other;
break;
}
}
}
else
{
rpc_l->ec = nano::error_blocks::work_low;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::receive ()
{
auto wallet (wallet_impl ());
auto account (account_impl ());
auto hash (hash_impl ("block"));
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
auto block_transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (block_transaction, hash));
if (block != nullptr)
{
if (node.store.pending_exists (block_transaction, nano::pending_key (account, hash)))
{
auto work (work_optional_impl ());
if (!ec && work)
{
nano::account_info info;
nano::root head;
if (!node.store.account_get (block_transaction, account, info))
{
head = info.head;
}
else
{
head = account;
}
if (nano::work_validate (head, work))
{
ec = nano::error_common::invalid_work;
}
}
else if (!ec) // && work == 0
{
if (!node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (response);
// clang-format off
wallet->receive_async(std::move(block), account, node.network_params.ledger.genesis_amount, [response_a](std::shared_ptr<nano::block> block_a) {
if (block_a != nullptr)
{
boost::property_tree::ptree response_l;
response_l.put("block", block_a->hash().to_string());
std::stringstream ostream;
boost::property_tree::write_json(ostream, response_l);
response_a(ostream.str());
}
else
{
json_error_response(response_a, "Error generating block");
}
},
work, generate_work);
// clang-format on
}
}
else
{
ec = nano::error_process::unreceivable;
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
}
// Because of receive_async
if (ec)
{
response_errors ();
}
}
void nano::json_handler::receive_minimum ()
{
if (!ec)
{
response_l.put ("amount", node.config.receive_minimum.to_string_dec ());
}
response_errors ();
}
void nano::json_handler::receive_minimum_set ()
{
auto amount (amount_impl ());
if (!ec)
{
node.config.receive_minimum = amount;
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::representatives ()
{
auto count (count_optional_impl ());
if (!ec)
{
const bool sorting = request.get<bool> ("sorting", false);
boost::property_tree::ptree representatives;
auto rep_amounts = node.ledger.rep_weights.get_rep_amounts ();
if (!sorting) // Simple
{
std::map<nano::account, nano::uint128_t> ordered (rep_amounts.begin (), rep_amounts.end ());
for (auto & rep_amount : rep_amounts)
{
auto const & account (rep_amount.first);
auto const & amount (rep_amount.second);
representatives.put (account.to_account (), amount.convert_to<std::string> ());
if (representatives.size () > count)
{
break;
}
}
}
else // Sorting
{
std::vector<std::pair<nano::uint128_t, std::string>> representation;
for (auto & rep_amount : rep_amounts)
{
auto const & account (rep_amount.first);
auto const & amount (rep_amount.second);
representation.emplace_back (amount, account.to_account ());
}
std::sort (representation.begin (), representation.end ());
std::reverse (representation.begin (), representation.end ());
for (auto i (representation.begin ()), n (representation.end ()); i != n && representatives.size () < count; ++i)
{
representatives.put (i->second, (i->first).convert_to<std::string> ());
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::json_handler::representatives_online ()
{
const auto accounts_node = request.get_child_optional ("accounts");
const bool weight = request.get<bool> ("weight", false);
std::vector<nano::public_key> accounts_to_filter;
if (accounts_node.is_initialized ())
{
for (auto & a : (*accounts_node))
{
auto account (account_impl (a.second.get<std::string> ("")));
if (!ec)
{
accounts_to_filter.push_back (account);
}
else
{
break;
}
}
}
if (!ec)
{
boost::property_tree::ptree representatives;
auto reps (node.online_reps.list ());
for (auto & i : reps)
{
if (accounts_node.is_initialized ())
{
if (accounts_to_filter.empty ())
{
break;
}
auto found_acc = std::find (accounts_to_filter.begin (), accounts_to_filter.end (), i);
if (found_acc == accounts_to_filter.end ())
{
continue;
}
else
{
accounts_to_filter.erase (found_acc);
}
}
if (weight)
{
boost::property_tree::ptree weight_node;
auto account_weight (node.ledger.weight (i));
weight_node.put ("weight", account_weight.convert_to<std::string> ());
representatives.add_child (i.to_account (), weight_node);
}
else
{
boost::property_tree::ptree entry;
entry.put ("", i.to_account ());
representatives.push_back (std::make_pair ("", entry));
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::json_handler::republish ()
{
auto count (count_optional_impl (1024U));
uint64_t sources (0);
uint64_t destinations (0);
boost::optional<std::string> sources_text (request.get_optional<std::string> ("sources"));
if (!ec && sources_text.is_initialized ())
{
if (decode_unsigned (sources_text.get (), sources))
{
ec = nano::error_rpc::invalid_sources;
}
}
boost::optional<std::string> destinations_text (request.get_optional<std::string> ("destinations"));
if (!ec && destinations_text.is_initialized ())
{
if (decode_unsigned (destinations_text.get (), destinations))
{
ec = nano::error_rpc::invalid_destinations;
}
}
auto hash (hash_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
std::deque<std::shared_ptr<nano::block>> republish_bundle;
for (auto i (0); !hash.is_zero () && i < count; ++i)
{
block = node.store.block_get (transaction, hash);
if (sources != 0) // Republish source chain
{
nano::block_hash source (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block_get (transaction, source));
std::vector<nano::block_hash> hashes;
while (block_a != nullptr && hashes.size () < sources)
{
hashes.push_back (source);
source = block_a->previous ();
block_a = node.store.block_get (transaction, source);
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash_l : hashes)
{
block_a = node.store.block_get (transaction, hash_l);
republish_bundle.push_back (std::move (block_a));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
republish_bundle.push_back (std::move (block)); // Republish block
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
if (destinations != 0) // Republish destination chain
{
auto block_b (node.store.block_get (transaction, hash));
auto destination (node.ledger.block_destination (transaction, *block_b));
if (!destination.is_zero ())
{
if (!node.store.pending_exists (transaction, nano::pending_key (destination, hash)))
{
nano::block_hash previous (node.ledger.latest (transaction, destination));
auto block_d (node.store.block_get (transaction, previous));
nano::block_hash source;
std::vector<nano::block_hash> hashes;
while (block_d != nullptr && hash != source)
{
hashes.push_back (previous);
source = node.ledger.block_source (transaction, *block_d);
previous = block_d->previous ();
block_d = node.store.block_get (transaction, previous);
}
std::reverse (hashes.begin (), hashes.end ());
if (hashes.size () > destinations)
{
hashes.resize (destinations);
}
for (auto & hash_l : hashes)
{
block_d = node.store.block_get (transaction, hash_l);
republish_bundle.push_back (std::move (block_d));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
}
}
hash = node.store.block_successor (transaction, hash);
}
node.network.flood_block_many (std::move (republish_bundle), nullptr, 25);
response_l.put ("success", ""); // obsolete
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::search_pending ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto error (wallet->search_pending ());
response_l.put ("started", !error);
}
response_errors ();
}
void nano::json_handler::search_pending_all ()
{
if (!ec)
{
node.wallets.search_pending_all ();
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::send ()
{
auto wallet (wallet_impl ());
auto amount (amount_impl ());
// Sending 0 amount is invalid with state blocks
if (!ec && amount.is_zero ())
{
ec = nano::error_common::invalid_amount;
}
std::string source_text (request.get<std::string> ("source"));
auto source (account_impl (source_text, nano::error_rpc::bad_source));
std::string destination_text (request.get<std::string> ("destination"));
auto destination (account_impl (destination_text, nano::error_rpc::bad_destination));
if (!ec)
{
auto work (work_optional_impl ());
nano::uint128_t balance (0);
if (!ec && work == 0 && !node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, source);
auto info (account_info_impl (block_transaction, source));
if (!ec)
{
balance = (info.balance).number ();
}
if (!ec && work)
{
if (nano::work_validate (info.head, work))
{
ec = nano::error_common::invalid_work;
}
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
boost::optional<std::string> send_id (request.get_optional<std::string> ("id"));
auto response_a (response);
auto response_data (std::make_shared<boost::property_tree::ptree> (response_l));
// clang-format off
wallet->send_async(source, destination, amount.number(), [balance, amount, response_a, response_data](std::shared_ptr<nano::block> block_a) {
if (block_a != nullptr)
{
response_data->put("block", block_a->hash().to_string());
std::stringstream ostream;
boost::property_tree::write_json(ostream, *response_data);
response_a(ostream.str());
}
else
{
if (balance >= amount.number())
{
json_error_response(response_a, "Error generating block");
}
else
{
std::error_code ec(nano::error_common::insufficient_balance);
json_error_response(response_a, ec.message());
}
}
},
work, generate_work, send_id);
// clang-format on
}
}
// Because of send_async
if (ec)
{
response_errors ();
}
}
void nano::json_handler::sign ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
// Retrieving hash
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
// Retrieving block
std::shared_ptr<nano::block> block;
boost::optional<std::string> block_text (request.get_optional<std::string> ("block"));
if (!ec && block_text.is_initialized ())
{
if (json_block_l)
{
block = block_json_impl (true);
}
else
{
block = block_impl (true);
}
if (block != nullptr)
{
hash = block->hash ();
}
}
// Hash or block are not initialized
if (!ec && hash.is_zero ())
{
ec = nano::error_blocks::invalid_block;
}
// Hash is initialized without config permission
else if (!ec && !hash.is_zero () && block == nullptr && !node_rpc_config.enable_sign_hash)
{
ec = nano::error_rpc::sign_hash_disabled;
}
if (!ec)
{
nano::raw_key prv;
prv.data.clear ();
// Retrieving private key from request
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (key_text.is_initialized ())
{
if (prv.data.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
else
{
// Retrieving private key from wallet
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized () && account_text.is_initialized ())
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
wallet->store.fetch (transaction, account, prv);
}
}
}
}
// Signing
if (prv.data != 0)
{
nano::public_key pub (nano::pub_key (prv.as_private_key ()));
nano::signature signature (nano::sign_message (prv, pub, hash));
response_l.put ("signature", signature.to_string ());
if (block != nullptr)
{
block->signature_set (signature);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
response_l.add_child ("block", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
response_l.put ("block", contents);
}
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
response_errors ();
}
void nano::json_handler::stats ()
{
auto sink = node.stats.log_sink_json ();
std::string type (request.get<std::string> ("type", ""));
bool use_sink = false;
if (type == "counters")
{
node.stats.log_counters (*sink);
use_sink = true;
}
else if (type == "objects")
{
construct_json (collect_seq_con_info (node, "node").get (), response_l);
}
else if (type == "samples")
{
node.stats.log_samples (*sink);
use_sink = true;
}
else
{
ec = nano::error_rpc::invalid_missing_type;
}
if (!ec && use_sink)
{
auto stat_tree_l (*static_cast<boost::property_tree::ptree *> (sink->to_object ()));
stat_tree_l.put ("stat_duration_seconds", node.stats.last_reset ().count ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, stat_tree_l);
response (ostream.str ());
}
else
{
response_errors ();
}
}
void nano::json_handler::stats_clear ()
{
node.stats.clear ();
response_l.put ("success", "");
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response (ostream.str ());
}
void nano::json_handler::stop ()
{
response_l.put ("success", "");
response_errors ();
if (!ec)
{
node.stop ();
stop_callback ();
}
}
void nano::json_handler::unchecked ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto count (count_optional_impl ());
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction)), n (node.store.unchecked_end ()); i != n && unchecked.size () < count; ++i)
{
nano::unchecked_info const & info (i->second);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
unchecked.add_child (info.block->hash ().to_string (), block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
unchecked.put (info.block->hash ().to_string (), contents);
}
}
response_l.add_child ("blocks", unchecked);
}
response_errors ();
}
void nano::json_handler::unchecked_clear ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto transaction (rpc_l->node.store.tx_begin_write ());
rpc_l->node.store.unchecked_clear (transaction);
rpc_l->response_l.put ("success", "");
rpc_l->response_errors ();
});
}
void nano::json_handler::unchecked_get ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction)), n (node.store.unchecked_end ()); i != n; ++i)
{
nano::unchecked_key const & key (i->first);
if (key.hash == hash)
{
nano::unchecked_info const & info (i->second);
response_l.put ("modified_timestamp", std::to_string (info.modified));
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
response_l.add_child ("contents", block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
response_l.put ("contents", contents);
}
break;
}
}
if (response_l.empty ())
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::unchecked_keys ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto count (count_optional_impl ());
nano::block_hash key (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("key"));
if (!ec && hash_text.is_initialized ())
{
if (key.decode_hex (hash_text.get ()))
{
ec = nano::error_rpc::bad_key;
}
}
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction, nano::unchecked_key (key, 0))), n (node.store.unchecked_end ()); i != n && unchecked.size () < count; ++i)
{
boost::property_tree::ptree entry;
nano::unchecked_info const & info (i->second);
entry.put ("key", i->first.key ().to_string ());
entry.put ("hash", info.block->hash ().to_string ());
entry.put ("modified_timestamp", std::to_string (info.modified));
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
entry.put ("contents", contents);
}
unchecked.push_back (std::make_pair ("", entry));
}
response_l.add_child ("unchecked", unchecked);
}
response_errors ();
}
void nano::json_handler::unopened ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
nano::account start (1); // exclude burn account by default
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start = account_impl (account_text.get ());
}
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto iterator (node.store.pending_begin (transaction, nano::pending_key (start, 0)));
auto end (node.store.pending_end ());
nano::account current_account (start);
nano::uint128_t current_account_sum{ 0 };
boost::property_tree::ptree accounts;
while (iterator != end && accounts.size () < count)
{
nano::pending_key key (iterator->first);
nano::account account (key.account);
nano::pending_info info (iterator->second);
if (node.store.account_exists (transaction, account))
{
if (account.number () == std::numeric_limits<nano::uint256_t>::max ())
{
break;
}
// Skip existing accounts
iterator = node.store.pending_begin (transaction, nano::pending_key (account.number () + 1, 0));
}
else
{
if (account != current_account)
{
if (current_account_sum > 0)
{
if (current_account_sum >= threshold.number ())
{
accounts.put (current_account.to_account (), current_account_sum.convert_to<std::string> ());
}
current_account_sum = 0;
}
current_account = account;
}
current_account_sum += info.amount.number ();
++iterator;
}
}
// last one after iterator reaches end
if (accounts.size () < count && current_account_sum > 0 && current_account_sum >= threshold.number ())
{
accounts.put (current_account.to_account (), current_account_sum.convert_to<std::string> ());
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::uptime ()
{
response_l.put ("seconds", std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - node.startup_time).count ());
response_errors ();
}
void nano::json_handler::version ()
{
response_l.put ("rpc_version", "1");
response_l.put ("store_version", std::to_string (node.store_version ()));
response_l.put ("protocol_version", std::to_string (node.network_params.protocol.protocol_version));
response_l.put ("node_vendor", boost::str (boost::format ("Nano %1%") % NANO_VERSION_STRING));
response_l.put ("network", node.network_params.network.get_current_network_as_string ());
response_l.put ("network_identifier", nano::genesis ().hash ().to_string ());
response_l.put ("build_info", BUILD_INFO);
response_errors ();
}
void nano::json_handler::validate_account_number ()
{
auto account (account_impl ());
(void)account;
response_l.put ("valid", ec ? "0" : "1");
ec = std::error_code (); // error is just invalid account
response_errors ();
}
void nano::json_handler::wallet_add ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string key_text (rpc_l->request.get<std::string> ("key"));
nano::raw_key key;
if (!key.data.decode_hex (key_text))
{
const bool generate_work = rpc_l->request.get<bool> ("work", true);
auto pub (wallet->insert_adhoc (key, generate_work));
if (!pub.is_zero ())
{
rpc_l->response_l.put ("account", pub.to_account ());
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::bad_private_key;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_add_watch ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
for (auto & accounts : rpc_l->request.get_child ("accounts"))
{
auto account (rpc_l->account_impl (accounts.second.data ()));
if (!rpc_l->ec)
{
if (wallet->insert_watch (transaction, account))
{
rpc_l->ec = nano::error_common::bad_public_key;
}
}
}
if (!rpc_l->ec)
{
rpc_l->response_l.put ("success", "");
}
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_info ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::uint128_t balance (0);
nano::uint128_t pending (0);
uint64_t count (0);
uint64_t deterministic_count (0);
uint64_t adhoc_count (0);
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
balance = balance + node.ledger.account_balance (block_transaction, account);
pending = pending + node.ledger.account_pending (block_transaction, account);
nano::key_type key_type (wallet->store.key_type (i->second));
if (key_type == nano::key_type::deterministic)
{
deterministic_count++;
}
else if (key_type == nano::key_type::adhoc)
{
adhoc_count++;
}
count++;
}
uint32_t deterministic_index (wallet->store.deterministic_index_get (transaction));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("pending", pending.convert_to<std::string> ());
response_l.put ("accounts_count", std::to_string (count));
response_l.put ("deterministic_count", std::to_string (deterministic_count));
response_l.put ("adhoc_count", std::to_string (adhoc_count));
response_l.put ("deterministic_index", std::to_string (deterministic_index));
}
response_errors ();
}
void nano::json_handler::wallet_balances ()
{
auto wallet (wallet_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
boost::property_tree::ptree balances;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::uint128_t balance = node.ledger.account_balance (block_transaction, account);
if (balance >= threshold.number ())
{
boost::property_tree::ptree entry;
nano::uint128_t pending = node.ledger.account_pending (block_transaction, account);
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("pending", pending.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
}
response_errors ();
}
void nano::json_handler::wallet_change_seed ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string seed_text (rpc_l->request.get<std::string> ("seed"));
nano::raw_key seed;
if (!seed.data.decode_hex (seed_text))
{
auto count (static_cast<uint32_t> (rpc_l->count_optional_impl (0)));
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
nano::public_key account (wallet->change_seed (transaction, seed, count));
rpc_l->response_l.put ("success", "");
rpc_l->response_l.put ("last_restored_account", account.to_account ());
auto index (wallet->store.deterministic_index_get (transaction));
assert (index > 0);
rpc_l->response_l.put ("restored_count", std::to_string (index));
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::bad_seed;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_contains ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto exists (wallet->store.find (transaction, account) != wallet->store.end ());
response_l.put ("exists", exists ? "1" : "0");
}
response_errors ();
}
void nano::json_handler::wallet_create ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
nano::raw_key seed;
auto seed_text (rpc_l->request.get_optional<std::string> ("seed"));
if (seed_text.is_initialized () && seed.data.decode_hex (seed_text.get ()))
{
rpc_l->ec = nano::error_common::bad_seed;
}
if (!rpc_l->ec)
{
auto wallet_id = random_wallet_id ();
auto wallet (rpc_l->node.wallets.create (wallet_id));
auto existing (rpc_l->node.wallets.items.find (wallet_id));
if (existing != rpc_l->node.wallets.items.end ())
{
rpc_l->response_l.put ("wallet", wallet_id.to_string ());
}
else
{
rpc_l->ec = nano::error_common::wallet_lmdb_max_dbs;
}
if (!rpc_l->ec && seed_text.is_initialized ())
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
nano::public_key account (wallet->change_seed (transaction, seed));
rpc_l->response_l.put ("last_restored_account", account.to_account ());
auto index (wallet->store.deterministic_index_get (transaction));
assert (index > 0);
rpc_l->response_l.put ("restored_count", std::to_string (index));
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_destroy ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
std::string wallet_text (rpc_l->request.get<std::string> ("wallet"));
nano::wallet_id wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (rpc_l->node.wallets.items.find (wallet));
if (existing != rpc_l->node.wallets.items.end ())
{
rpc_l->node.wallets.destroy (wallet);
bool destroyed (rpc_l->node.wallets.items.find (wallet) == rpc_l->node.wallets.items.end ());
rpc_l->response_l.put ("destroyed", destroyed ? "1" : "0");
}
else
{
rpc_l->ec = nano::error_common::wallet_not_found;
}
}
else
{
rpc_l->ec = nano::error_common::bad_wallet_number;
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_export ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
std::string json;
wallet->store.serialize_json (transaction, json);
response_l.put ("json", json);
}
response_errors ();
}
void nano::json_handler::wallet_frontiers ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::json_handler::wallet_history ()
{
uint64_t modified_since (1);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
if (decode_unsigned (modified_since_text.get (), modified_since))
{
ec = nano::error_rpc::invalid_timestamp;
}
}
auto wallet (wallet_impl ());
if (!ec)
{
std::multimap<uint64_t, boost::property_tree::ptree, std::greater<uint64_t>> entries;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!node.store.account_get (block_transaction, account, info))
{
auto timestamp (info.modified);
auto hash (info.head);
while (timestamp >= modified_since && !hash.is_zero ())
{
nano::block_sideband sideband;
auto block (node.store.block_get (block_transaction, hash, &sideband));
timestamp = sideband.timestamp;
if (block != nullptr && timestamp >= modified_since)
{
boost::property_tree::ptree entry;
std::vector<nano::public_key> no_filter;
history_visitor visitor (*this, false, block_transaction, entry, hash, no_filter);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("block_account", account.to_account ());
entry.put ("hash", hash.to_string ());
entry.put ("local_timestamp", std::to_string (timestamp));
entries.insert (std::make_pair (timestamp, entry));
}
hash = block->previous ();
}
else
{
hash.clear ();
}
}
}
}
boost::property_tree::ptree history;
for (auto i (entries.begin ()), n (entries.end ()); i != n; ++i)
{
history.push_back (std::make_pair ("", i->second));
}
response_l.add_child ("history", history);
}
response_errors ();
}
void nano::json_handler::wallet_key_valid ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
response_l.put ("valid", valid ? "1" : "0");
}
response_errors ();
}
void nano::json_handler::wallet_ledger ()
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
modified_since = strtoul (modified_since_text.get ().c_str (), NULL, 10);
}
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!node.store.account_get (block_transaction, account, info))
{
if (info.modified >= modified_since)
{
boost::property_tree::ptree entry;
entry.put ("frontier", info.head.to_string ());
entry.put ("open_block", info.open_block.to_string ());
entry.put ("representative_block", node.ledger.representative (block_transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
entry.put ("balance", balance);
entry.put ("modified_timestamp", std::to_string (info.modified));
entry.put ("block_count", std::to_string (info.block_count));
if (representative)
{
entry.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
entry.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (block_transaction, account));
entry.put ("pending", account_pending.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), entry));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::wallet_lock ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::raw_key empty;
empty.data.clear ();
wallet->store.password.value_set (empty);
response_l.put ("locked", "1");
node.logger.try_log ("Wallet locked");
}
response_errors ();
}
void nano::json_handler::wallet_pending ()
{
auto wallet (wallet_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
if (!ec)
{
boost::property_tree::ptree pending;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
boost::property_tree::ptree peers_l;
for (auto ii (node.store.pending_begin (block_transaction, nano::pending_key (account, 0))); nano::pending_key (ii->first).account == account && peers_l.size () < count; ++ii)
{
nano::pending_key key (ii->first);
if (block_confirmed (node, block_transaction, key.hash, include_active, include_only_confirmed))
{
if (threshold.is_zero () && !source)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info info (ii->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", epoch_as_string (info.epoch));
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (!peers_l.empty ())
{
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
}
response_errors ();
}
void nano::json_handler::wallet_representative ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
response_l.put ("representative", wallet->store.representative (transaction).to_account ());
}
response_errors ();
}
void nano::json_handler::wallet_representative_set ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
std::string representative_text (rpc_l->request.get<std::string> ("representative"));
auto representative (rpc_l->account_impl (representative_text, nano::error_rpc::bad_representative_number));
if (!rpc_l->ec)
{
bool update_existing_accounts (rpc_l->request.get<bool> ("update_existing_accounts", false));
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction) || !update_existing_accounts)
{
wallet->store.representative_set (transaction, representative);
rpc_l->response_l.put ("set", "1");
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
// Change representative for all wallet accounts
if (!rpc_l->ec && update_existing_accounts)
{
std::vector<nano::account> accounts;
{
auto transaction (rpc_l->node.wallets.tx_begin_read ());
auto block_transaction (rpc_l->node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!rpc_l->node.store.account_get (block_transaction, account, info))
{
if (info.representative != representative)
{
accounts.push_back (account);
}
}
}
}
for (auto & account : accounts)
{
// clang-format off
wallet->change_async(account, representative, [](std::shared_ptr<nano::block>) {}, 0, false);
// clang-format on
}
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_republish ()
{
auto wallet (wallet_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
std::deque<std::shared_ptr<nano::block>> republish_bundle;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
std::shared_ptr<nano::block> block;
std::vector<nano::block_hash> hashes;
while (!latest.is_zero () && hashes.size () < count)
{
hashes.push_back (latest);
block = node.store.block_get (block_transaction, latest);
latest = block->previous ();
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash : hashes)
{
block = node.store.block_get (block_transaction, hash);
republish_bundle.push_back (std::move (block));
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
}
node.network.flood_block_many (std::move (republish_bundle), nullptr, 25);
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::json_handler::wallet_seed ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
if (wallet->store.valid_password (transaction))
{
nano::raw_key seed;
wallet->store.seed (seed, transaction);
response_l.put ("seed", seed.data.to_string ());
}
else
{
ec = nano::error_common::wallet_locked;
}
}
response_errors ();
}
void nano::json_handler::wallet_work_get ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree works;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
works.put (account.to_account (), nano::to_string_hex (work));
}
response_l.add_child ("works", works);
}
response_errors ();
}
void nano::json_handler::work_generate ()
{
boost::optional<nano::account> account;
auto account_opt (request.get_optional<std::string> ("account"));
if (account_opt.is_initialized ())
{
account = account_impl (account_opt.get ());
}
if (!ec)
{
auto hash (hash_impl ());
auto difficulty (difficulty_optional_impl ());
multiplier_optional_impl (difficulty);
if (!ec && (difficulty > node.config.max_work_generate_difficulty || difficulty < node.network_params.network.publish_threshold))
{
ec = nano::error_rpc::difficulty_limit;
}
if (!ec)
{
auto use_peers (request.get<bool> ("use_peers", false));
auto rpc_l (shared_from_this ());
auto callback = [rpc_l, hash, this](boost::optional<uint64_t> const & work_a) {
if (work_a)
{
boost::property_tree::ptree response_l;
response_l.put ("hash", hash.to_string ());
uint64_t work (work_a.value ());
response_l.put ("work", nano::to_string_hex (work));
std::stringstream ostream;
uint64_t result_difficulty;
nano::work_validate (hash, work, &result_difficulty);
response_l.put ("difficulty", nano::to_string_hex (result_difficulty));
auto result_multiplier = nano::difficulty::to_multiplier (result_difficulty, this->node.network_params.network.publish_threshold);
response_l.put ("multiplier", nano::to_string (result_multiplier));
boost::property_tree::write_json (ostream, response_l);
rpc_l->response (ostream.str ());
}
else
{
json_error_response (rpc_l->response, "Cancelled");
}
};
if (!use_peers)
{
if (node.local_work_generation_enabled ())
{
node.work.generate (hash, callback, difficulty);
}
else
{
ec = nano::error_common::disabled_local_work_generation;
}
}
else
{
if (!account_opt.is_initialized ())
{
// Fetch account from block if not given
auto transaction_l (node.store.tx_begin_read ());
if (node.store.block_exists (transaction_l, hash))
{
account = node.store.block_account (transaction_l, hash);
}
}
auto secondary_work_peers_l (request.get<bool> ("secondary_work_peers", false));
auto const & peers_l (secondary_work_peers_l ? node.config.secondary_work_peers : node.config.work_peers);
if (node.work_generation_enabled (peers_l))
{
node.work_generate (hash, callback, difficulty, account, secondary_work_peers_l);
}
else
{
ec = nano::error_common::disabled_work_generation;
}
}
}
}
// Because of callback
if (ec)
{
response_errors ();
}
}
void nano::json_handler::work_cancel ()
{
auto hash (hash_impl ());
if (!ec)
{
node.observers.work_cancel.notify (hash);
}
response_errors ();
}
void nano::json_handler::work_get ()
{
auto wallet (wallet_impl ());
auto account (account_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
response_l.put ("work", nano::to_string_hex (work));
}
}
response_errors ();
}
void nano::json_handler::work_set ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
auto work (rpc_l->work_optional_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
wallet->store.work_put (transaction, account, work);
rpc_l->response_l.put ("success", "");
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::work_validate ()
{
auto hash (hash_impl ());
auto work (work_optional_impl ());
auto difficulty (difficulty_optional_impl ());
multiplier_optional_impl (difficulty);
if (!ec)
{
uint64_t result_difficulty (0);
nano::work_validate (hash, work, &result_difficulty);
response_l.put ("valid", (result_difficulty >= difficulty) ? "1" : "0");
response_l.put ("difficulty", nano::to_string_hex (result_difficulty));
auto result_multiplier = nano::difficulty::to_multiplier (result_difficulty, node.network_params.network.publish_threshold);
response_l.put ("multiplier", nano::to_string (result_multiplier));
}
response_errors ();
}
void nano::json_handler::work_peer_add ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.config.work_peers.push_back (std::make_pair (address_text, port));
response_l.put ("success", "");
}
else
{
ec = nano::error_common::invalid_port;
}
response_errors ();
}
void nano::json_handler::work_peers ()
{
boost::property_tree::ptree work_peers_l;
for (auto i (node.config.work_peers.begin ()), n (node.config.work_peers.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", boost::str (boost::format ("%1%:%2%") % i->first % i->second));
work_peers_l.push_back (std::make_pair ("", entry));
}
response_l.add_child ("work_peers", work_peers_l);
response_errors ();
}
void nano::json_handler::work_peers_clear ()
{
node.config.work_peers.clear ();
response_l.put ("success", "");
response_errors ();
}
namespace
{
void construct_json (nano::seq_con_info_component * component, boost::property_tree::ptree & parent)
{
// We are a leaf node, print name and exit
if (!component->is_composite ())
{
auto & leaf_info = static_cast<nano::seq_con_info_leaf *> (component)->get_info ();
boost::property_tree::ptree child;
child.put ("count", leaf_info.count);
child.put ("size", leaf_info.count * leaf_info.sizeof_element);
parent.add_child (leaf_info.name, child);
return;
}
auto composite = static_cast<nano::seq_con_info_composite *> (component);
boost::property_tree::ptree current;
for (auto & child : composite->get_children ())
{
construct_json (child.get (), current);
}
parent.add_child (composite->get_name (), current);
}
// Any RPC handlers which require no arguments (excl default arguments) should go here.
// This is to prevent large if/else chains which compilers can have limits for (MSVC for instance has 128).
ipc_json_handler_no_arg_func_map create_ipc_json_handler_no_arg_func_map ()
{
ipc_json_handler_no_arg_func_map no_arg_funcs;
no_arg_funcs.emplace ("account_balance", &nano::json_handler::account_balance);
no_arg_funcs.emplace ("account_block_count", &nano::json_handler::account_block_count);
no_arg_funcs.emplace ("account_count", &nano::json_handler::account_count);
no_arg_funcs.emplace ("account_create", &nano::json_handler::account_create);
no_arg_funcs.emplace ("account_get", &nano::json_handler::account_get);
no_arg_funcs.emplace ("account_history", &nano::json_handler::account_history);
no_arg_funcs.emplace ("account_info", &nano::json_handler::account_info);
no_arg_funcs.emplace ("account_key", &nano::json_handler::account_key);
no_arg_funcs.emplace ("account_list", &nano::json_handler::account_list);
no_arg_funcs.emplace ("account_move", &nano::json_handler::account_move);
no_arg_funcs.emplace ("account_remove", &nano::json_handler::account_remove);
no_arg_funcs.emplace ("account_representative", &nano::json_handler::account_representative);
no_arg_funcs.emplace ("account_representative_set", &nano::json_handler::account_representative_set);
no_arg_funcs.emplace ("account_weight", &nano::json_handler::account_weight);
no_arg_funcs.emplace ("accounts_balances", &nano::json_handler::accounts_balances);
no_arg_funcs.emplace ("accounts_create", &nano::json_handler::accounts_create);
no_arg_funcs.emplace ("accounts_frontiers", &nano::json_handler::accounts_frontiers);
no_arg_funcs.emplace ("accounts_pending", &nano::json_handler::accounts_pending);
no_arg_funcs.emplace ("active_difficulty", &nano::json_handler::active_difficulty);
no_arg_funcs.emplace ("available_supply", &nano::json_handler::available_supply);
no_arg_funcs.emplace ("block_info", &nano::json_handler::block_info);
no_arg_funcs.emplace ("block", &nano::json_handler::block_info);
no_arg_funcs.emplace ("block_confirm", &nano::json_handler::block_confirm);
no_arg_funcs.emplace ("blocks", &nano::json_handler::blocks);
no_arg_funcs.emplace ("blocks_info", &nano::json_handler::blocks_info);
no_arg_funcs.emplace ("block_account", &nano::json_handler::block_account);
no_arg_funcs.emplace ("block_count", &nano::json_handler::block_count);
no_arg_funcs.emplace ("block_count_type", &nano::json_handler::block_count_type);
no_arg_funcs.emplace ("block_create", &nano::json_handler::block_create);
no_arg_funcs.emplace ("block_hash", &nano::json_handler::block_hash);
no_arg_funcs.emplace ("bootstrap", &nano::json_handler::bootstrap);
no_arg_funcs.emplace ("bootstrap_any", &nano::json_handler::bootstrap_any);
no_arg_funcs.emplace ("bootstrap_lazy", &nano::json_handler::bootstrap_lazy);
no_arg_funcs.emplace ("bootstrap_status", &nano::json_handler::bootstrap_status);
no_arg_funcs.emplace ("confirmation_active", &nano::json_handler::confirmation_active);
no_arg_funcs.emplace ("confirmation_height_currently_processing", &nano::json_handler::confirmation_height_currently_processing);
no_arg_funcs.emplace ("confirmation_history", &nano::json_handler::confirmation_history);
no_arg_funcs.emplace ("confirmation_info", &nano::json_handler::confirmation_info);
no_arg_funcs.emplace ("confirmation_quorum", &nano::json_handler::confirmation_quorum);
no_arg_funcs.emplace ("database_txn_tracker", &nano::json_handler::database_txn_tracker);
no_arg_funcs.emplace ("delegators", &nano::json_handler::delegators);
no_arg_funcs.emplace ("delegators_count", &nano::json_handler::delegators_count);
no_arg_funcs.emplace ("deterministic_key", &nano::json_handler::deterministic_key);
no_arg_funcs.emplace ("epoch_upgrade", &nano::json_handler::epoch_upgrade);
no_arg_funcs.emplace ("frontiers", &nano::json_handler::frontiers);
no_arg_funcs.emplace ("frontier_count", &nano::json_handler::account_count);
no_arg_funcs.emplace ("keepalive", &nano::json_handler::keepalive);
no_arg_funcs.emplace ("key_create", &nano::json_handler::key_create);
no_arg_funcs.emplace ("key_expand", &nano::json_handler::key_expand);
no_arg_funcs.emplace ("ledger", &nano::json_handler::ledger);
no_arg_funcs.emplace ("node_id", &nano::json_handler::node_id);
no_arg_funcs.emplace ("node_id_delete", &nano::json_handler::node_id_delete);
no_arg_funcs.emplace ("password_change", &nano::json_handler::password_change);
no_arg_funcs.emplace ("password_enter", &nano::json_handler::password_enter);
no_arg_funcs.emplace ("wallet_unlock", &nano::json_handler::password_enter);
no_arg_funcs.emplace ("payment_begin", &nano::json_handler::payment_begin);
no_arg_funcs.emplace ("payment_init", &nano::json_handler::payment_init);
no_arg_funcs.emplace ("payment_end", &nano::json_handler::payment_end);
no_arg_funcs.emplace ("payment_wait", &nano::json_handler::payment_wait);
no_arg_funcs.emplace ("peers", &nano::json_handler::peers);
no_arg_funcs.emplace ("pending", &nano::json_handler::pending);
no_arg_funcs.emplace ("pending_exists", &nano::json_handler::pending_exists);
no_arg_funcs.emplace ("process", &nano::json_handler::process);
no_arg_funcs.emplace ("receive", &nano::json_handler::receive);
no_arg_funcs.emplace ("receive_minimum", &nano::json_handler::receive_minimum);
no_arg_funcs.emplace ("receive_minimum_set", &nano::json_handler::receive_minimum_set);
no_arg_funcs.emplace ("representatives", &nano::json_handler::representatives);
no_arg_funcs.emplace ("representatives_online", &nano::json_handler::representatives_online);
no_arg_funcs.emplace ("republish", &nano::json_handler::republish);
no_arg_funcs.emplace ("search_pending", &nano::json_handler::search_pending);
no_arg_funcs.emplace ("search_pending_all", &nano::json_handler::search_pending_all);
no_arg_funcs.emplace ("send", &nano::json_handler::send);
no_arg_funcs.emplace ("sign", &nano::json_handler::sign);
no_arg_funcs.emplace ("stats", &nano::json_handler::stats);
no_arg_funcs.emplace ("stats_clear", &nano::json_handler::stats_clear);
no_arg_funcs.emplace ("stop", &nano::json_handler::stop);
no_arg_funcs.emplace ("unchecked", &nano::json_handler::unchecked);
no_arg_funcs.emplace ("unchecked_clear", &nano::json_handler::unchecked_clear);
no_arg_funcs.emplace ("unchecked_get", &nano::json_handler::unchecked_get);
no_arg_funcs.emplace ("unchecked_keys", &nano::json_handler::unchecked_keys);
no_arg_funcs.emplace ("unopened", &nano::json_handler::unopened);
no_arg_funcs.emplace ("uptime", &nano::json_handler::uptime);
no_arg_funcs.emplace ("validate_account_number", &nano::json_handler::validate_account_number);
no_arg_funcs.emplace ("version", &nano::json_handler::version);
no_arg_funcs.emplace ("wallet_add", &nano::json_handler::wallet_add);
no_arg_funcs.emplace ("wallet_add_watch", &nano::json_handler::wallet_add_watch);
no_arg_funcs.emplace ("wallet_balances", &nano::json_handler::wallet_balances);
no_arg_funcs.emplace ("wallet_change_seed", &nano::json_handler::wallet_change_seed);
no_arg_funcs.emplace ("wallet_contains", &nano::json_handler::wallet_contains);
no_arg_funcs.emplace ("wallet_create", &nano::json_handler::wallet_create);
no_arg_funcs.emplace ("wallet_destroy", &nano::json_handler::wallet_destroy);
no_arg_funcs.emplace ("wallet_export", &nano::json_handler::wallet_export);
no_arg_funcs.emplace ("wallet_frontiers", &nano::json_handler::wallet_frontiers);
no_arg_funcs.emplace ("wallet_history", &nano::json_handler::wallet_history);
no_arg_funcs.emplace ("wallet_info", &nano::json_handler::wallet_info);
no_arg_funcs.emplace ("wallet_balance_total", &nano::json_handler::wallet_info);
no_arg_funcs.emplace ("wallet_key_valid", &nano::json_handler::wallet_key_valid);
no_arg_funcs.emplace ("wallet_ledger", &nano::json_handler::wallet_ledger);
no_arg_funcs.emplace ("wallet_lock", &nano::json_handler::wallet_lock);
no_arg_funcs.emplace ("wallet_pending", &nano::json_handler::wallet_pending);
no_arg_funcs.emplace ("wallet_representative", &nano::json_handler::wallet_representative);
no_arg_funcs.emplace ("wallet_representative_set", &nano::json_handler::wallet_representative_set);
no_arg_funcs.emplace ("wallet_republish", &nano::json_handler::wallet_republish);
no_arg_funcs.emplace ("wallet_work_get", &nano::json_handler::wallet_work_get);
no_arg_funcs.emplace ("work_generate", &nano::json_handler::work_generate);
no_arg_funcs.emplace ("work_cancel", &nano::json_handler::work_cancel);
no_arg_funcs.emplace ("work_get", &nano::json_handler::work_get);
no_arg_funcs.emplace ("work_set", &nano::json_handler::work_set);
no_arg_funcs.emplace ("work_validate", &nano::json_handler::work_validate);
no_arg_funcs.emplace ("work_peer_add", &nano::json_handler::work_peer_add);
no_arg_funcs.emplace ("work_peers", &nano::json_handler::work_peers);
no_arg_funcs.emplace ("work_peers_clear", &nano::json_handler::work_peers_clear);
return no_arg_funcs;
}
/** Due to the asynchronous nature of updating confirmation heights, it can also be necessary to check active roots */
bool block_confirmed (nano::node & node, nano::transaction & transaction, nano::block_hash const & hash, bool include_active, bool include_only_confirmed)
{
bool is_confirmed = false;
if (include_active && !include_only_confirmed)
{
is_confirmed = true;
}
// Check whether the confirmation height is set
else if (node.ledger.block_confirmed (transaction, hash))
{
is_confirmed = true;
}
// This just checks it's not currently undergoing an active transaction
else if (!include_only_confirmed)
{
auto block (node.store.block_get (transaction, hash));
is_confirmed = (block != nullptr && !node.active.active (*block));
}
return is_confirmed;
}
const char * epoch_as_string (nano::epoch epoch)
{
switch (epoch)
{
case nano::epoch::epoch_2:
return "2";
case nano::epoch::epoch_1:
return "1";
default:
return "0";
}
}
}
| 1 | 16,046 | json_handler::receive () has already started a read tx that can be used here right? | nanocurrency-nano-node | cpp |
@@ -84,6 +84,9 @@ describe( 'CompatibilityChecks', () => {
it( 'should make API requests to "setup-checks, health-checks and AMP Project test URL', async () => {
const token = 'test-token-value';
+ global._googlesitekitBaseData = global._googlesitekitBaseData || {};
+ global._googlesitekitBaseData[ 'isWP5.0+' ] = true;
+
// Mock request to setup-tag.
fetchMock.postOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/setup-tag/, | 1 | /**
* CompatibilityChecks component tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import CompatibilityChecks, { AMP_PROJECT_TEST_URL } from './compatibility-checks';
import { render, waitFor } from '../../../../tests/js/test-utils';
import { Fragment } from 'react';
import { muteFetch } from '../../../../tests/js/utils';
const compatibilityChildren = ( { complete, inProgressFeedback, CTAFeedback } ) => (
<Fragment>
{ CTAFeedback }
{ complete }
{ inProgressFeedback }
</Fragment>
);
describe( 'CompatibilityChecks', () => {
beforeEach( () => {
// Mock global.location.hostname with value that won't throw error in first check.
Object.defineProperty( global.window, 'location', {
value: {
hostname: 'validurl',
},
writable: true,
} );
} );
it( 'should initially display "Checking Compatibility..." message', async () => {
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/setup-tag/ );
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/developer-plugin/ );
muteFetch( { query: { tagverify: '1' } } );
const { container } = render(
<CompatibilityChecks>
{ compatibilityChildren }
</CompatibilityChecks>
);
expect( container ).toHaveTextContent( 'Checking Compatibility…' );
} );
it( 'should display "Your site may not be ready for Site Kit" if a check throws an error', async () => {
// Mock request to setup-tag
fetchMock.postOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/setup-tag/,
{ body: {}, status: 500 }
);
// Mock request to develop-plugin when error is thrown.
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/developer-plugin/ );
const { container } = render(
<CompatibilityChecks>
{ compatibilityChildren }
</CompatibilityChecks>
);
await waitFor( () => {
expect( fetchMock ).toHaveFetchedTimes( 2 );
} );
// Expect neither error nor incomplete text to be displayed.
expect( container ).toHaveTextContent( 'Your site may not be ready for Site Kit' );
expect( console ).toHaveErrored();
} );
it( 'should make API requests to "setup-checks, health-checks and AMP Project test URL', async () => {
const token = 'test-token-value';
// Mock request to setup-tag.
fetchMock.postOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/setup-tag/,
{ body: { token }, status: 200 }
);
// Mock request to health-checks.
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/health-checks/,
{ body: { checks: { googleAPI: { pass: true } } }, status: 200 }
);
// Mock request to AMP project.
muteFetch( AMP_PROJECT_TEST_URL );
// Mock getExistingTag request.
fetchMock.get(
{ query: { tagverify: '1' } },
{
body: `<html><head><meta name="googlesitekit-setup" content="${ token }"/></head><body></body>`,
status: 200,
}
);
render(
<CompatibilityChecks>
{ compatibilityChildren }
</CompatibilityChecks>
);
await waitFor( () => {
expect( fetchMock ).toHaveFetchedTimes( 4 );
} );
// Expect to have made requests to the setup-checks and health-checks endpoints and the AMP Project test URL.
expect( fetchMock ).toHaveFetched( /^\/google-site-kit\/v1\/core\/site\/data\/setup-tag/ );
expect( fetchMock ).toHaveFetched( /^\/google-site-kit\/v1\/core\/site\/data\/health-checks/ );
expect( fetchMock ).toHaveFetched( AMP_PROJECT_TEST_URL );
} );
it( 'should not contain incomplete or error messages if checks are successful', async () => {
const token = 'test-token-value';
// Mock request to setup-tag.
fetchMock.postOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/setup-tag/,
{ body: { token }, status: 200 }
);
// Mock request to health-checks.
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/health-checks/,
{ body: { checks: { googleAPI: { pass: true } } }, status: 200 }
);
// Mock request to AMP project.
muteFetch( AMP_PROJECT_TEST_URL );
// Mock getExistingTag request
fetchMock.get(
{ query: { tagverify: '1' } },
{
body: `<html><head><meta name="googlesitekit-setup" content="${ token }"/></head><body></body>`,
status: 200,
}
);
const { container } = render(
<CompatibilityChecks>
{ compatibilityChildren }
</CompatibilityChecks>
);
await waitFor( () => {
expect( fetchMock ).toHaveFetchedTimes( 4 );
} );
// Expect neither error nor incomplete text to be displayed.
expect( container ).not.toHaveTextContent( 'Your site may not be ready for Site Kit' );
expect( container ).not.toHaveTextContent( 'Checking Compatibility…' );
} );
} );
| 1 | 33,987 | We should add the definition to `.storybook/config.js` as well. | google-site-kit-wp | js |
@@ -96,7 +96,7 @@ ex_expr::exp_return_type ex_branch_clause::eval(char *op_data[],
switch (getOperType())
{
case ITM_AND:
- if (*(Lng32 *)op_data[1] == 0)
+ if (*(Lng32 *)op_data[1] == 0 || *(Lng32 *)op_data[1] == -1) // null treated as false
{
*(Lng32 *)op_data[0] = 0;
setNextClause(branch_clause); | 1 | /* -*-C++-*-
*****************************************************************************
*
* File: <file>
* Description:
*
*
* Created: 7/10/95
* Language: C++
*
*
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
*
*
*****************************************************************************
*/
#include "Platform.h"
#include <stddef.h>
#include "exp_stdh.h"
#include "exp_clause_derived.h"
ex_expr::exp_return_type ex_bool_clause::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
// boolean values: 0 = false, 1 = true, -1 = null
switch (getOperType())
{
case ITM_AND:
if ((*(Lng32 *)op_data[1] == -1) &&
(*(Lng32 *)op_data[2] != 0))
*(Lng32 *)op_data[0] = -1;
else
if (*(Lng32 *)op_data[1] == 0)
*(Lng32 *)op_data[0] = 0;
else
*(Lng32 *)op_data[0] = *(Lng32 *)op_data[2];
break;
case ITM_OR:
if ((*(Lng32 *)op_data[1] == -1) &&
(*(Lng32 *)op_data[2] != 1))
*(Lng32 *)op_data[0] = -1;
else
if (*(Lng32 *)op_data[1] == 1)
*(Lng32 *)op_data[0] = 1;
else
*(Lng32 *)op_data[0] = *(Lng32 *)op_data[2];
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
retcode = ex_expr::EXPR_ERROR;
break;
}
return retcode;
}
///////////////////////////////////////////////////////////////
// class ex_branch_clause
///////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_branch_clause::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
// boolean values: 0 = false, 1 = true, -1 = null
switch (getOperType())
{
case ITM_AND:
if (*(Lng32 *)op_data[1] == 0)
{
*(Lng32 *)op_data[0] = 0;
setNextClause(branch_clause);
}
else
{
*(Lng32 *)op_data[0] = *(Lng32 *)op_data[1];
setNextClause(saved_next_clause);
}
break;
case ITM_OR:
if (*(Lng32 *)op_data[1] == 1)
{
*(Lng32 *)op_data[0] = 1;
setNextClause(branch_clause);
}
else
{
*(Lng32 *)op_data[0] = *(Lng32 *)op_data[1];
setNextClause(saved_next_clause);
}
break;
case ITM_RETURN_TRUE:
setNextClause(branch_clause);
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
retcode = ex_expr::EXPR_ERROR;
break;
}
return retcode;
}
/////////////////////////////////////////////////////////////
// class bool_result_clause
/////////////////////////////////////////////////////////////
ex_expr::exp_return_type bool_result_clause::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// boolean values: 0 = false, 1 = true, -1 = null
if ((*(Lng32 *)op_data[0] == 0) || (*(Lng32 *)op_data[0] == -1))
return ex_expr::EXPR_FALSE;
else
return ex_expr::EXPR_TRUE;
}
| 1 | 22,420 | I don't believe this is correct. Consider the query, "select a from t1x where not(b = 0 and c = 0)". When B and C are both null, both equal predicates evaluate to null, and the AND evaluates to null. The NOT then also evaluates to null. The WHERE clause should treat the result of the NOT as false. But with this fix, the result of the AND will be false, making the NOT true. There needs to be three cases here for ITM_AND: If the first operand is false, then the AND is false. If the first operand is true, then the result is the second operand. If the first operand is null, then if the second operand is false, the result is false otherwise the result is null. Similar logic needs to be added to the ITM_OR case. | apache-trafodion | cpp |
@@ -72,7 +72,12 @@ final class IntArrayDocIdSet extends DocIdSet {
@Override
public int advance(int target) throws IOException {
- i = Arrays.binarySearch(docs, i + 1, length, target);
+ int bound = 1;
+ int offset = Math.max(0, i);
+ while(offset + bound < length && docs[offset + bound] < target) {
+ bound *= 2;
+ }
+ i = Arrays.binarySearch(docs, offset + bound / 2, Math.min(offset + bound, length), target);
if (i < 0) {
i = -1 - i;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
final class IntArrayDocIdSet extends DocIdSet {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayDocIdSet.class);
private final int[] docs;
private final int length;
IntArrayDocIdSet(int[] docs, int length) {
if (docs[length] != DocIdSetIterator.NO_MORE_DOCS) {
throw new IllegalArgumentException();
}
this.docs = docs;
this.length = length;
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(docs);
}
@Override
public DocIdSetIterator iterator() throws IOException {
return new IntArrayDocIdSetIterator(docs, length);
}
static class IntArrayDocIdSetIterator extends DocIdSetIterator {
private final int[] docs;
private final int length;
private int i = -1;
private int doc = -1;
IntArrayDocIdSetIterator(int[] docs, int length) {
this.docs = docs;
this.length = length;
}
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return doc = docs[++i];
}
@Override
public int advance(int target) throws IOException {
i = Arrays.binarySearch(docs, i + 1, length, target);
if (i < 0) {
i = -1 - i;
}
return doc = docs[i];
}
@Override
public long cost() {
return length;
}
}
}
| 1 | 29,138 | `bound/2` is generally the previous bound that we tested, except when `bound` is equal to 1. It won't break in that case since callers are not supposed to call advance on a target that is lte the current doc ID, but this might still make room for bugs? | apache-lucene-solr | java |
@@ -65,8 +65,12 @@ function Admin(db, topology, promiseLibrary) {
* @method
* @param {object} command The command hash
* @param {object} [options] Optional settings.
- * @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
- * @param {number} [options.maxTimeMS] Number of milliseconds to wait before aborting the query.
+ * @param {boolean} [options.raw=false] Return a raw nodejs Buffer instead of deserializing BSON
+ * @param {boolean} [options.fullResult=false] Return the full envelope instead of just the result document
+ * @param {boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized
+ * @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
+ * @param {ReadPreference|string} [options.readPreference] Specify read preference if command supports it
+ * @param {ClientSession} [options.session] Session to use for the operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/ | 1 | 'use strict';
const executeOperation = require('./utils').executeOperation;
const applyWriteConcern = require('./utils').applyWriteConcern;
const addUser = require('./operations/db_ops').addUser;
const executeDbAdminCommand = require('./operations/db_ops').executeDbAdminCommand;
const removeUser = require('./operations/db_ops').removeUser;
const replSetGetStatus = require('./operations/admin_ops').replSetGetStatus;
const serverStatus = require('./operations/admin_ops').serverStatus;
const validateCollection = require('./operations/admin_ops').validateCollection;
/**
* @fileOverview The **Admin** class is an internal class that allows convenient access to
* the admin functionality and commands for MongoDB.
*
* **ADMIN Cannot directly be instantiated**
* @example
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
*
* // Connect using MongoClient
* MongoClient.connect(url, function(err, client) {
* // Use the admin database for the operation
* const adminDb = client.db(dbName).admin();
*
* // List all the available databases
* adminDb.listDatabases(function(err, dbs) {
* test.equal(null, err);
* test.ok(dbs.databases.length > 0);
* client.close();
* });
* });
*/
/**
* Create a new Admin instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {Admin} a collection instance.
*/
function Admin(db, topology, promiseLibrary) {
if (!(this instanceof Admin)) return new Admin(db, topology);
// Internal state
this.s = {
db: db,
topology: topology,
promiseLibrary: promiseLibrary
};
}
/**
* The callback format for results
* @callback Admin~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object} result The result object if the command was executed successfully.
*/
/**
* Execute a command
* @method
* @param {object} command The command hash
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS] Number of milliseconds to wait before aborting the query.
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.command = function(command, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() : {};
return executeOperation(this.s.db.s.topology, executeDbAdminCommand.bind(this.s.db), [
this.s.db,
command,
options,
callback
]);
};
/**
* Retrieve the server information for the current
* instance of the db client
*
* @param {Object} [options] optional parameters for this operation
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.buildInfo = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
const cmd = { buildinfo: 1 };
return executeOperation(this.s.db.s.topology, executeDbAdminCommand.bind(this.s.db), [
this.s.db,
cmd,
options,
callback
]);
};
/**
* Retrieve the server information for the current
* instance of the db client
*
* @param {Object} [options] optional parameters for this operation
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.serverInfo = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
const cmd = { buildinfo: 1 };
return executeOperation(this.s.db.s.topology, executeDbAdminCommand.bind(this.s.db), [
this.s.db,
cmd,
options,
callback
]);
};
/**
* Retrieve this db's server status.
*
* @param {Object} [options] optional parameters for this operation
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.serverStatus = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.db.s.topology, serverStatus, [this, options, callback]);
};
/**
* Ping the MongoDB server and retrieve results
*
* @param {Object} [options] optional parameters for this operation
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.ping = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
const cmd = { ping: 1 };
return executeOperation(this.s.db.s.topology, executeDbAdminCommand.bind(this.s.db), [
this.s.db,
cmd,
options,
callback
]);
};
/**
* Add a user to the database.
* @method
* @param {string} username The username.
* @param {string} password The password.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j] Specify a journal write concern.
* @param {boolean} [options.fsync] Specify a file sync write concern.
* @param {object} [options.customData] Custom data associated with the user (only Mongodb 2.6 or higher)
* @param {object[]} [options.roles] Roles associated with the created user (only Mongodb 2.6 or higher)
* @param {Array} [options.authenticationRestrictions] The authentication restrictions the server enforces on the created user. Specifies a list of IP addresses and CIDR ranges from which the user is allowed to connect to the server or from which the server can accept users. New in MongoDB 3.6.
* @param {Array} [options.mechanisms] Specify the specific SCRAM mechanism or mechanisms for creating SCRAM user credentials. New in MongoDB 4.0
* @param {string} [options.passwordDigestor] Indicates whether the server or the client digests the password. The default for MongoDB 4.0 is 'server'. The default before 4.0 is 'client'.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.addUser = function(username, password, options, callback) {
const args = Array.prototype.slice.call(arguments, 2);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() : {};
options = Object.assign({}, options);
// Get the options
options = applyWriteConcern(options, { db: this.s.db });
// Set the db name to admin
options.dbName = 'admin';
return executeOperation(this.s.db.s.topology, addUser, [
this.s.db,
username,
password,
options,
callback
]);
};
/**
* Remove a user from a database
* @method
* @param {string} username The username.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j] Specify a journal write concern.
* @param {boolean} [options.fsync] Specify a file sync write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.removeUser = function(username, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() : {};
options = Object.assign({}, options);
// Get the options
options = applyWriteConcern(options, { db: this.s.db });
// Set the db name
options.dbName = 'admin';
return executeOperation(this.s.db.s.topology, removeUser, [
this.s.db,
username,
options,
callback
]);
};
/**
* Validate an existing collection
*
* @param {string} collectionName The name of the collection to validate.
* @param {object} [options] Optional settings.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.validateCollection = function(collectionName, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.db.s.topology, validateCollection, [
this,
collectionName,
options,
callback
]);
};
/**
* List the available databases
*
* @param {object} [options] Optional settings.
* @param {object} [options.filter] A query predicate that determines which databases are listed.
* @param {boolean} [options.nameOnly] Whether the command should return only db names, or names and size info.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.listDatabases = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
const cmd = { listDatabases: 1 };
if (options.nameOnly) cmd.nameOnly = Number(cmd.nameOnly);
return executeOperation(this.s.db.s.topology, executeDbAdminCommand.bind(this.s.db), [
this.s.db,
cmd,
options,
callback
]);
};
/**
* Get ReplicaSet status
*
* @param {Object} [options] optional parameters for this operation
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.replSetGetStatus = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.db.s.topology, replSetGetStatus, [this, options, callback]);
};
module.exports = Admin;
| 1 | 14,935 | Can we remove `raw`, `fullResult`, and `serializeFunctions`? | mongodb-node-mongodb-native | js |
@@ -110,6 +110,8 @@ type (
TLS RootTLS `yaml:"tls"`
// Metrics is the metrics subsystem configuration
Metrics *Metrics `yaml:"metrics"`
+ // Settings for authentication and authorization
+ Security Security `yaml:"security"`
}
// RootTLS contains all TLS settings for the Temporal server | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package config
import (
"encoding/json"
"time"
"github.com/uber-go/tally/m3"
"github.com/uber-go/tally/prometheus"
"go.temporal.io/server/common/auth"
"go.temporal.io/server/common/elasticsearch"
"go.temporal.io/server/common/messaging"
"go.temporal.io/server/common/service/dynamicconfig"
)
const (
// ReplicationConsumerTypeKafka means consuming replication tasks from kafka.
ReplicationConsumerTypeKafka = "kafka"
// ReplicationConsumerTypeRPC means pulling source DC for replication tasks.
ReplicationConsumerTypeRPC = "rpc"
)
type (
// Config contains the configuration for a set of temporal services
Config struct {
// Global is process-wide service-related configuration
Global Global `yaml:"global"`
// Persistence contains the configuration for temporal datastores
Persistence Persistence `yaml:"persistence"`
// Log is the logging config
Log Logger `yaml:"log"`
// ClusterMetadata is the config containing all valid clusters and active cluster
ClusterMetadata *ClusterMetadata `yaml:"clusterMetadata"`
// DCRedirectionPolicy contains the frontend datacenter redirection policy
DCRedirectionPolicy DCRedirectionPolicy `yaml:"dcRedirectionPolicy"`
// Services is a map of service name to service config items
Services map[string]Service `yaml:"services"`
// Kafka is the config for connecting to kafka
Kafka messaging.KafkaConfig `yaml:"kafka"`
// Archival is the config for archival
Archival Archival `yaml:"archival"`
// PublicClient is config for connecting to temporal frontend
PublicClient PublicClient `yaml:"publicClient"`
// DynamicConfigClient is the config for setting up the file based dynamic config client
// Filepath should be relative to the root directory
DynamicConfigClient dynamicconfig.FileBasedClientConfig `yaml:"dynamicConfigClient"`
// NamespaceDefaults is the default config for every namespace
NamespaceDefaults NamespaceDefaults `yaml:"namespaceDefaults"`
}
// Service contains the service specific config items
Service struct {
// RPC is the rpc configuration
RPC RPC `yaml:"rpc"`
// Deprecated. Use Metrics in global section instead.
Metrics Metrics `yaml:"metrics"`
}
// PProf contains the config items for the pprof utility
PProf struct {
// Port is the port on which the PProf will bind to
Port int `yaml:"port"`
}
// RPC contains the rpc config items
RPC struct {
// GRPCPort is the port on which gRPC will listen
GRPCPort int `yaml:"grpcPort"`
// Port used for membership listener
MembershipPort int `yaml:"membershipPort"`
// BindOnLocalHost is true if localhost is the bind address
BindOnLocalHost bool `yaml:"bindOnLocalHost"`
// BindOnIP can be used to bind service on specific ip (eg. `0.0.0.0`) -
// check net.ParseIP for supported syntax, only IPv4 is supported,
// mutually exclusive with `BindOnLocalHost` option
BindOnIP string `yaml:"bindOnIP"`
}
// Global contains config items that apply process-wide to all services
Global struct {
// Membership is the ringpop related configuration
Membership Membership `yaml:"membership"`
// PProf is the PProf configuration
PProf PProf `yaml:"pprof"`
// TLS controls the communication encryption configuration
TLS RootTLS `yaml:"tls"`
// Metrics is the metrics subsystem configuration
Metrics *Metrics `yaml:"metrics"`
}
// RootTLS contains all TLS settings for the Temporal server
RootTLS struct {
// Internode controls backend service communication TLS settings.
Internode GroupTLS `yaml:"internode"`
// Frontend controls SDK Client to Frontend communication TLS settings.
Frontend GroupTLS `yaml:"frontend"`
}
// GroupTLS contains an instance client and server TLS settings
GroupTLS struct {
// Client handles client TLS settings
Client ClientTLS `yaml:"client"`
// Server handles the server (listener) TLS settings
Server ServerTLS `yaml:"server"`
// PerHostOverrides contains per-hostname TLS settings that
// are used for external clients connecting to the Temporal Cluster on that
// specific hostname. Host names are case insensitive. Optional. If not present,
// uses configuration supplied by Server field.
PerHostOverrides map[string]ServerTLS `yaml:"hostOverrides"`
}
// ServerTLS contains items to load server TLS configuration
ServerTLS struct {
// The path to the file containing the PEM-encoded public key of the certificate to use.
CertFile string `yaml:"certFile"`
// The path to the file containing the PEM-encoded private key of the certificate to use.
KeyFile string `yaml:"keyFile"`
// A list of paths to files containing the PEM-encoded public key of the Certificate Authorities you wish to trust for client authentication.
// This value is ignored if `requireClientAuth` is not enabled. Cannot specify both ClientCAFiles and ClientCAData
ClientCAFiles []string `yaml:"clientCaFiles"`
// Base64 equivalents of the above artifacts.
// You cannot specify both a Data and a File for the same artifact (e.g. setting CertFile and CertData)
CertData string `yaml:"certData"`
KeyData string `yaml:"keyData"`
ClientCAData []string `yaml:"clientCaData"`
// Requires clients to authenticate with a certificate when connecting, otherwise known as mutual TLS.
RequireClientAuth bool `yaml:"requireClientAuth"`
}
// ClientTLS contains TLS configuration for clients within the Temporal Cluster to connect to Temporal nodes.
ClientTLS struct {
// DNS name to validate against for server to server connections.
// Required when TLS is enabled in a multi-host cluster.
// This name should be referenced by the certificate specified in the ServerTLS section.
ServerName string `yaml:"serverName"`
// If you want to verify the temporal server hostname and server cert, then you should turn this on
// This option is basically equivalent to InSecureSkipVerify
// See InSecureSkipVerify in http://golang.org/pkg/crypto/tls/ for more info
DisableHostVerification bool `yaml:"disableHostVerification"`
// Optional - A list of paths to files containing the PEM-encoded public key of the Certificate Authorities that are used to validate the server's TLS certificate
// You cannot specify both RootCAFiles and RootCAData
RootCAFiles []string `yaml:"rootCaFiles"`
// Optional - A list of base64 PEM-encoded public keys of the Certificate Authorities that are used to validate the server's TLS certificate.
// You cannot specify both RootCAFiles and RootCAData
RootCAData []string `yaml:"rootCaData"`
}
// Membership contains config items related to the membership layer of temporal
Membership struct {
// MaxJoinDuration is the max wait time to join the gossip ring
MaxJoinDuration time.Duration `yaml:"maxJoinDuration"`
// BroadcastAddress is used as the address that is communicated to remote nodes to connect on.
// This is generally used when BindOnIP would be the same across several nodes (ie: 0.0.0.0)
// and for nat traversal scenarios. Check net.ParseIP for supported syntax, only IPv4 is supported.
BroadcastAddress string `yaml:"broadcastAddress"`
}
// Persistence contains the configuration for data store / persistence layer
Persistence struct {
// DefaultStore is the name of the default data store to use
DefaultStore string `yaml:"defaultStore" validate:"nonzero"`
// VisibilityStore is the name of the datastore to be used for visibility records
VisibilityStore string `yaml:"visibilityStore" validate:"nonzero"`
// AdvancedVisibilityStore is the name of the datastore to be used for visibility records
AdvancedVisibilityStore string `yaml:"advancedVisibilityStore"`
// HistoryMaxConns is the desired number of conns to history store. Value specified
// here overrides the MaxConns config specified as part of datastore
HistoryMaxConns int `yaml:"historyMaxConns"`
// NumHistoryShards is the desired number of history shards. This config doesn't
// belong here, needs refactoring
NumHistoryShards int32 `yaml:"numHistoryShards" validate:"nonzero"`
// DataStores contains the configuration for all datastores
DataStores map[string]DataStore `yaml:"datastores"`
// VisibilityConfig is config for visibility sampling
VisibilityConfig *VisibilityConfig `yaml:"-" json:"-"`
// TransactionSizeLimit is the largest allowed transaction size
TransactionSizeLimit dynamicconfig.IntPropertyFn `yaml:"-" json:"-"`
}
// DataStore is the configuration for a single datastore
DataStore struct {
// Cassandra contains the config for a cassandra datastore
Cassandra *Cassandra `yaml:"cassandra"`
// SQL contains the config for a SQL based datastore
SQL *SQL `yaml:"sql"`
// Custom contains the config for custom datastore implementation
CustomDataStoreConfig *CustomDatastoreConfig `yaml:"customDatastore"`
// ElasticSearch contains the config for a ElasticSearch datastore
ElasticSearch *elasticsearch.Config `yaml:"elasticsearch"`
}
// VisibilityConfig is config for visibility sampling
VisibilityConfig struct {
// EnableSampling for visibility
EnableSampling dynamicconfig.BoolPropertyFn `yaml:"-" json:"-"`
// VisibilityOpenMaxQPS max QPS for record open workflows
VisibilityOpenMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter `yaml:"-" json:"-"`
// VisibilityClosedMaxQPS max QPS for record closed workflows
VisibilityClosedMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter `yaml:"-" json:"-"`
// VisibilityListMaxQPS max QPS for list workflow
VisibilityListMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter `yaml:"-" json:"-"`
// ESIndexMaxResultWindow ElasticSearch index setting max_result_window
ESIndexMaxResultWindow dynamicconfig.IntPropertyFn `yaml:"-" json:"-"`
// MaxQPS is overall max QPS
MaxQPS dynamicconfig.IntPropertyFn `yaml:"-" json:"-"`
// ValidSearchAttributes is legal indexed keys that can be used in list APIs
ValidSearchAttributes dynamicconfig.MapPropertyFn `yaml:"-" json:"-"`
}
// Cassandra contains configuration to connect to Cassandra cluster
Cassandra struct {
// Hosts is a csv of cassandra endpoints
Hosts string `yaml:"hosts" validate:"nonzero"`
// Port is the cassandra port used for connection by gocql client
Port int `yaml:"port"`
// User is the cassandra user used for authentication by gocql client
User string `yaml:"user"`
// Password is the cassandra password used for authentication by gocql client
Password string `yaml:"password"`
// keyspace is the cassandra keyspace
Keyspace string `yaml:"keyspace" validate:"nonzero"`
// Datacenter is the data center filter arg for cassandra
Datacenter string `yaml:"datacenter"`
// MaxConns is the max number of connections to this datastore for a single keyspace
MaxConns int `yaml:"maxConns"`
// ConnectTimeout is a timeout for initial dial to cassandra server (default: 600 milliseconds)
ConnectTimeout time.Duration `yaml:"connectTimeout"`
// TLS configuration
TLS *auth.TLS `yaml:"tls"`
// Consistency configuration (defaults to LOCAL_QUORUM / LOCAL_SERIAL for all stores if this field not set)
Consistency *CassandraStoreConsistency `yaml:"consistency"`
}
// CassandraStoreConsistency enables you to set the consistency settings for each Cassandra Persistence Store for Temporal
CassandraStoreConsistency struct {
// Default defines the consistency level for ALL stores.
// Defaults to LOCAL_QUORUM and LOCAL_SERIAL if not set
Default *CassandraConsistencySettings `yaml:"default"`
}
// CassandraConsistencySettings sets the default consistency level for regular & serial queries to Cassandra.
CassandraConsistencySettings struct {
// Consistency sets the default consistency level. Values identical to gocql Consistency values. (defaults to LOCAL_QUORUM if not set).
Consistency string `yaml:"consistency"`
// SerialConsistency sets the consistency for the serial prtion of queries. Values identical to gocql SerialConsistency values. (defaults to LOCAL_SERIAL if not set)
SerialConsistency string `yaml:"serialConsistency"`
}
// SQL is the configuration for connecting to a SQL backed datastore
SQL struct {
// User is the username to be used for the conn
User string `yaml:"user"`
// Password is the password corresponding to the user name
Password string `yaml:"password"`
// PluginName is the name of SQL plugin
PluginName string `yaml:"pluginName" validate:"nonzero"`
// DatabaseName is the name of SQL database to connect to
DatabaseName string `yaml:"databaseName" validate:"nonzero"`
// ConnectAddr is the remote addr of the database
ConnectAddr string `yaml:"connectAddr" validate:"nonzero"`
// ConnectProtocol is the protocol that goes with the ConnectAddr ex - tcp, unix
ConnectProtocol string `yaml:"connectProtocol" validate:"nonzero"`
// ConnectAttributes is a set of key-value attributes to be sent as part of connect data_source_name url
ConnectAttributes map[string]string `yaml:"connectAttributes"`
// MaxConns the max number of connections to this datastore
MaxConns int `yaml:"maxConns"`
// MaxIdleConns is the max number of idle connections to this datastore
MaxIdleConns int `yaml:"maxIdleConns"`
// MaxConnLifetime is the maximum time a connection can be alive
MaxConnLifetime time.Duration `yaml:"maxConnLifetime"`
// EXPERIMENTAL - TaskScanPartitions is the number of partitions to sequentially scan during ListTaskQueue operations.
// This is used for in a sharded sql database such as Vitess for heavy task workloads to minimize scatter gather.
// The default value for this param is 1, and should not be configured without a thorough understanding of what this does.
TaskScanPartitions int `yaml:"taskScanPartitions"`
// TLS is the configuration for TLS connections
TLS *auth.TLS `yaml:"tls"`
}
// CustomDatastoreConfig is the configuration for connecting to a custom datastore that is not supported by temporal core
CustomDatastoreConfig struct {
// Name of the custom datastore
Name string `yaml:"name"`
// Options is a set of key-value attributes that can be used by AbstractDatastoreFactory implementation
Options map[string]string `yaml:"options"`
}
// Replicator describes the configuration of replicator
Replicator struct{}
// Logger contains the config items for logger
Logger struct {
// Stdout is true if the output needs to goto standard out
Stdout bool `yaml:"stdout"`
// Level is the desired log level
Level string `yaml:"level"`
// OutputFile is the path to the log output file
OutputFile string `yaml:"outputFile"`
}
// ClusterMetadata contains the all cluster which participated in cross DC
ClusterMetadata struct {
EnableGlobalNamespace bool `yaml:"enableGlobalNamespace"`
// ReplicationConsumerConfig determines how we consume replication tasks.
ReplicationConsumer *ReplicationConsumerConfig `yaml:"replicationConsumer"`
// FailoverVersionIncrement is the increment of each cluster version when failover happens
FailoverVersionIncrement int64 `yaml:"failoverVersionIncrement"`
// MasterClusterName is the master cluster name, only the master cluster can register / update namespace
// all clusters can do namespace failover
MasterClusterName string `yaml:"masterClusterName"`
// CurrentClusterName is the name of the current cluster
CurrentClusterName string `yaml:"currentClusterName"`
// ClusterInformation contains all cluster names to corresponding information about that cluster
ClusterInformation map[string]ClusterInformation `yaml:"clusterInformation"`
}
// ClusterInformation contains the information about each cluster which participated in cross DC
ClusterInformation struct {
Enabled bool `yaml:"enabled"`
InitialFailoverVersion int64 `yaml:"initialFailoverVersion"`
// TODO: remove RPCName (not used with gRPC)
// RPCName indicate the remote service name
RPCName string `yaml:"rpcName"`
// Address indicate the remote service address(Host:Port). Host can be DNS name.
RPCAddress string `yaml:"rpcAddress"`
}
// ReplicationConsumerConfig contains config for replication consumer
ReplicationConsumerConfig struct {
// Type determines how we consume replication tasks. It can be either kafka(default) or rpc.
Type string `yaml:"type"`
}
// ReplicationTaskProcessorConfig is the config for replication task processor.
ReplicationTaskProcessorConfig struct {
NoTaskInitialWaitIntervalSecs int `yaml:"noTaskInitialWaitIntervalSecs"`
NoTaskWaitBackoffCoefficient float64 `yaml:"noTaskWaitBackoffCoefficient"`
NoTaskMaxWaitIntervalSecs int `yaml:"noTaskMaxWaitIntervalSecs"`
}
// DCRedirectionPolicy contains the frontend datacenter redirection policy
DCRedirectionPolicy struct {
Policy string `yaml:"policy"`
ToDC string `yaml:"toDC"`
}
// Metrics contains the config items for metrics subsystem
Metrics struct {
// M3 is the configuration for m3 metrics reporter
M3 *m3.Configuration `yaml:"m3"`
// Statsd is the configuration for statsd reporter
Statsd *Statsd `yaml:"statsd"`
// Prometheus is the configuration for prometheus reporter
Prometheus *prometheus.Configuration `yaml:"prometheus"`
// Tags is the set of key-value pairs to be reported as part of every metric
Tags map[string]string `yaml:"tags"`
// Prefix sets the prefix to all outgoing metrics
Prefix string `yaml:"prefix"`
}
// Statsd contains the config items for statsd metrics reporter
Statsd struct {
// The host and port of the statsd server
HostPort string `yaml:"hostPort" validate:"nonzero"`
// The prefix to use in reporting to statsd
Prefix string `yaml:"prefix" validate:"nonzero"`
// FlushInterval is the maximum interval for sending packets.
// If it is not specified, it defaults to 1 second.
FlushInterval time.Duration `yaml:"flushInterval"`
// FlushBytes specifies the maximum udp packet size you wish to send.
// If FlushBytes is unspecified, it defaults to 1432 bytes, which is
// considered safe for local traffic.
FlushBytes int `yaml:"flushBytes"`
}
// Archival contains the config for archival
Archival struct {
// History is the config for the history archival
History HistoryArchival `yaml:"history"`
// Visibility is the config for visibility archival
Visibility VisibilityArchival `yaml:"visibility"`
}
// HistoryArchival contains the config for history archival
HistoryArchival struct {
// State is the state of history archival either: enabled, disabled, or paused
State string `yaml:"state"`
// EnableRead whether history can be read from archival
EnableRead bool `yaml:"enableRead"`
// Provider contains the config for all history archivers
Provider *HistoryArchiverProvider `yaml:"provider"`
}
// HistoryArchiverProvider contains the config for all history archivers
HistoryArchiverProvider struct {
Filestore *FilestoreArchiver `yaml:"filestore"`
Gstorage *GstorageArchiver `yaml:"gstorage"`
S3store *S3Archiver `yaml:"s3store"`
}
// VisibilityArchival contains the config for visibility archival
VisibilityArchival struct {
// State is the state of visibility archival either: enabled, disabled, or paused
State string `yaml:"state"`
// EnableRead whether visibility can be read from archival
EnableRead bool `yaml:"enableRead"`
// Provider contains the config for all visibility archivers
Provider *VisibilityArchiverProvider `yaml:"provider"`
}
// VisibilityArchiverProvider contains the config for all visibility archivers
VisibilityArchiverProvider struct {
Filestore *FilestoreArchiver `yaml:"filestore"`
S3store *S3Archiver `yaml:"s3store"`
Gstorage *GstorageArchiver `yaml:"gstorage"`
}
// FilestoreArchiver contain the config for filestore archiver
FilestoreArchiver struct {
FileMode string `yaml:"fileMode"`
DirMode string `yaml:"dirMode"`
}
// GstorageArchiver contain the config for google storage archiver
GstorageArchiver struct {
CredentialsPath string `yaml:"credentialsPath"`
}
// S3Archiver contains the config for S3 archiver
S3Archiver struct {
Region string `yaml:"region"`
Endpoint *string `yaml:"endpoint"`
S3ForcePathStyle bool `yaml:"s3ForcePathStyle"`
}
// PublicClient is config for connecting to temporal frontend
PublicClient struct {
// HostPort is the host port to connect on. Host can be DNS name
HostPort string `yaml:"hostPort" validate:"nonzero"`
// interval to refresh DNS. Default to 10s
RefreshInterval time.Duration `yaml:"RefreshInterval"`
}
// NamespaceDefaults is the default config for each namespace
NamespaceDefaults struct {
// Archival is the default archival config for each namespace
Archival ArchivalNamespaceDefaults `yaml:"archival"`
}
// ArchivalNamespaceDefaults is the default archival config for each namespace
ArchivalNamespaceDefaults struct {
// History is the namespace default history archival config for each namespace
History HistoryArchivalNamespaceDefaults `yaml:"history"`
// Visibility is the namespace default visibility archival config for each namespace
Visibility VisibilityArchivalNamespaceDefaults `yaml:"visibility"`
}
// HistoryArchivalNamespaceDefaults is the default history archival config for each namespace
HistoryArchivalNamespaceDefaults struct {
// State is the namespace default state of history archival: enabled or disabled
State string `yaml:"state"`
// URI is the namespace default URI for history archiver
URI string `yaml:"URI"`
}
// VisibilityArchivalNamespaceDefaults is the default visibility archival config for each namespace
VisibilityArchivalNamespaceDefaults struct {
// State is the namespace default state of visibility archival: enabled or disabled
State string `yaml:"state"`
// URI is the namespace default URI for visibility archiver
URI string `yaml:"URI"`
}
)
// Validate validates this config
func (c *Config) Validate() error {
if err := c.Persistence.Validate(); err != nil {
return err
}
if err := c.Archival.Validate(&c.NamespaceDefaults.Archival); err != nil {
return err
}
return nil
}
// String converts the config object into a string
func (c *Config) String() string {
out, _ := json.MarshalIndent(c, "", " ")
return string(out)
}
func (r *GroupTLS) IsEnabled() bool {
return r.Server.KeyFile != "" || r.Server.KeyData != ""
}
| 1 | 10,791 | [Nit] I don't have a proposed suggestion, but security seems too generic of a name here. | temporalio-temporal | go |
@@ -0,0 +1,19 @@
+const config = require('../lib/config')
+const util = require('../lib/util')
+const path = require('path')
+const fs = require('fs-extra')
+
+const createDist = (options) => {
+ config.update(options)
+ config.buildConfig = 'Release'
+
+ let cmdOptions = config.defaultOptions
+ const args = util.buildArgsToString(config.buildArgs())
+
+ fs.removeSync(path.join(config.outputDir, 'dist'))
+ util.run('gn', ['gen', config.outputDir, '--args="' + args + '"'], cmdOptions)
+ util.run('ninja', ['-C', config.outputDir, 'create_dist'], cmdOptions)
+}
+
+module.exports = createDist
+ | 1 | 1 | 5,253 | do we need to force a buildConfig here? I know it's in muon, but that might actually be making things harder for people | brave-brave-browser | js |
|
@@ -302,6 +302,10 @@ bool Client::Process() {
}
if (AutoFireEnabled()) {
+ if (this->GetTarget() == this) {
+ this->MessageString(Chat::TooFarAway, TRY_ATTACKING_SOMEONE);
+ auto_fire = false;
+ }
EQ::ItemInstance *ranged = GetInv().GetItem(EQ::invslot::slotRange);
if (ranged)
{ | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2003 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
client_process.cpp:
Handles client login sequence and packets sent from client to zone
*/
#include "../common/eqemu_logsys.h"
#include "../common/global_define.h"
#include <iostream>
#include <stdio.h>
#include <zlib.h>
#ifdef _WINDOWS
#include <winsock2.h>
#include <windows.h>
#define snprintf _snprintf
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#else
#include <pthread.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <unistd.h>
#endif
#include "../common/data_verification.h"
#include "../common/rulesys.h"
#include "../common/skills.h"
#include "../common/spdat.h"
#include "../common/string_util.h"
#include "event_codes.h"
#include "expedition.h"
#include "guild_mgr.h"
#include "map.h"
#include "petitions.h"
#include "queryserv.h"
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "worldserver.h"
#include "zone.h"
#include "zonedb.h"
#include "zone_store.h"
extern QueryServ* QServ;
extern Zone* zone;
extern volatile bool is_zone_loaded;
extern WorldServer worldserver;
extern PetitionList petition_list;
extern EntityList entity_list;
bool Client::Process() {
bool ret = true;
if (Connected() || IsLD()) {
// try to send all packets that weren't sent before
if (!IsLD() && zoneinpacket_timer.Check()) {
SendAllPackets();
}
if (adventure_request_timer) {
if (adventure_request_timer->Check()) {
safe_delete(adventure_request_timer);
}
}
if (adventure_create_timer) {
if (adventure_create_timer->Check()) {
safe_delete(adventure_create_timer);
}
}
if (adventure_leave_timer) {
if (adventure_leave_timer->Check()) {
safe_delete(adventure_leave_timer);
}
}
if (adventure_door_timer) {
if (adventure_door_timer->Check()) {
safe_delete(adventure_door_timer);
}
}
if (adventure_stats_timer) {
if (adventure_stats_timer->Check()) {
safe_delete(adventure_stats_timer);
}
}
if (adventure_leaderboard_timer) {
if (adventure_leaderboard_timer->Check()) {
safe_delete(adventure_leaderboard_timer);
}
}
if (dead) {
SetHP(-100);
if (RespawnFromHoverTimer.Check())
HandleRespawnFromHover(0);
}
if (IsTracking() && (ClientVersion() >= EQ::versions::ClientVersion::SoD) && TrackingTimer.Check())
DoTracking();
// SendHPUpdate calls hpupdate_timer.Start so it can delay this timer, so lets not reset with the check
// since the function will anyways
if (hpupdate_timer.Check(false)) {
SendHPUpdate();
}
/* I haven't naturally updated my position in 10 seconds, updating manually */
if (!is_client_moving && position_update_timer.Check()) {
SentPositionPacket(0.0f, 0.0f, 0.0f, 0.0f, 0);
}
if (mana_timer.Check())
CheckManaEndUpdate();
if (dead && dead_timer.Check()) {
database.MoveCharacterToZone(GetName(), m_pp.binds[0].zone_id);
m_pp.zone_id = m_pp.binds[0].zone_id;
m_pp.zoneInstance = m_pp.binds[0].instance_id;
m_pp.x = m_pp.binds[0].x;
m_pp.y = m_pp.binds[0].y;
m_pp.z = m_pp.binds[0].z;
Save();
Group *mygroup = GetGroup();
if (mygroup)
{
entity_list.MessageGroup(this, true, 15, "%s died.", GetName());
mygroup->MemberZoned(this);
}
Raid *myraid = entity_list.GetRaidByClient(this);
if (myraid)
{
myraid->MemberZoned(this);
}
return(false);
}
if (charm_update_timer.Check()) {
CalcItemScale();
}
if (TaskPeriodic_Timer.Check() && task_state)
task_state->TaskPeriodicChecks(this);
if (dynamiczone_removal_timer.Check() && zone && zone->GetInstanceID() != 0)
{
dynamiczone_removal_timer.Disable();
GoToDzSafeReturnOrBind(zone->GetDynamicZone());
}
if (linkdead_timer.Check()) {
LeaveGroup();
Save();
if (GetMerc()) {
GetMerc()->Save();
GetMerc()->Depop();
}
Raid *myraid = entity_list.GetRaidByClient(this);
if (myraid) {
myraid->MemberZoned(this);
}
SetDynamicZoneMemberStatus(DynamicZoneMemberStatus::Offline);
parse->EventPlayer(EVENT_DISCONNECT, this, "", 0);
return false; //delete client
}
if (camp_timer.Check()) {
LeaveGroup();
Save();
if (GetMerc())
{
GetMerc()->Save();
GetMerc()->Depop();
}
instalog = true;
}
if (IsStunned() && stunned_timer.Check())
Mob::UnStun();
cheat_manager.ClientProcess();
if (bardsong_timer.Check() && bardsong != 0) {
//NOTE: this is kinda a heavy-handed check to make sure the mob still exists before
//doing the next pulse on them...
Mob *song_target = nullptr;
if (bardsong_target_id == GetID()) {
song_target = this;
}
else {
song_target = entity_list.GetMob(bardsong_target_id);
}
if (song_target == nullptr) {
InterruptSpell(SONG_ENDS_ABRUPTLY, 0x121, bardsong);
}
else {
if (!ApplyNextBardPulse(bardsong, song_target, bardsong_slot))
InterruptSpell(SONG_ENDS_ABRUPTLY, 0x121, bardsong);
//SpellFinished(bardsong, bardsong_target, bardsong_slot, spells[bardsong].mana);
}
}
if (GetMerc()) {
UpdateMercTimer();
}
if (GetMercInfo().MercTemplateID != 0 && GetMercInfo().IsSuspended) {
CheckMercSuspendTimer();
}
if (IsAIControlled())
AI_Process();
// Don't reset the bindwound timer so we can check it in BindWound as well.
if (bindwound_timer.Check(false) && bindwound_target != 0) {
BindWound(bindwound_target, false);
}
if (KarmaUpdateTimer) {
if (KarmaUpdateTimer->Check(false)) {
KarmaUpdateTimer->Start(RuleI(Chat, KarmaUpdateIntervalMS));
database.UpdateKarma(AccountID(), ++TotalKarma);
}
}
if (qGlobals) {
if (qglobal_purge_timer.Check()) {
qGlobals->PurgeExpiredGlobals();
}
}
if (RuleB(Character, ActiveInvSnapshots) && time(nullptr) >= GetNextInvSnapshotTime()) {
if (database.SaveCharacterInvSnapshot(CharacterID())) {
SetNextInvSnapshot(RuleI(Character, InvSnapshotMinIntervalM));
LogInventory("Successful inventory snapshot taken of [{}] - setting next interval for [{}] minute[{}]",
GetName(), RuleI(Character, InvSnapshotMinIntervalM), (RuleI(Character, InvSnapshotMinIntervalM) == 1 ? "" : "s"));
}
else {
SetNextInvSnapshot(RuleI(Character, InvSnapshotMinRetryM));
LogInventory("Failed to take inventory snapshot of [{}] - retrying in [{}] minute[{}]",
GetName(), RuleI(Character, InvSnapshotMinRetryM), (RuleI(Character, InvSnapshotMinRetryM) == 1 ? "" : "s"));
}
}
/**
* Scan close range mobs
* Used in aggro checks
*/
if (mob_close_scan_timer.Check()) {
entity_list.ScanCloseMobs(close_mobs, this, is_client_moving);
}
bool may_use_attacks = false;
/*
Things which prevent us from attacking:
- being under AI control, the AI does attacks
- being dead
- casting a spell and bard check
- not having a target
- being stunned or mezzed
- having used a ranged weapon recently
*/
if (auto_attack) {
if (!IsAIControlled() && !dead
&& !(spellend_timer.Enabled() && casting_spell_id && !IsBardSong(casting_spell_id))
&& !IsStunned() && !IsFeared() && !IsMezzed() && GetAppearance() != eaDead && !IsMeleeDisabled()
)
may_use_attacks = true;
if (may_use_attacks && ranged_timer.Enabled()) {
//if the range timer is enabled, we need to consider it
if (!ranged_timer.Check(false)) {
//the ranged timer has not elapsed, cannot attack.
may_use_attacks = false;
}
}
}
if (AutoFireEnabled()) {
EQ::ItemInstance *ranged = GetInv().GetItem(EQ::invslot::slotRange);
if (ranged)
{
if (ranged->GetItem() && ranged->GetItem()->ItemType == EQ::item::ItemTypeBow) {
if (ranged_timer.Check(false)) {
if (GetTarget() && (GetTarget()->IsNPC() || GetTarget()->IsClient())) {
if (GetTarget()->InFrontMob(this, GetTarget()->GetX(), GetTarget()->GetY())) {
if (CheckLosFN(GetTarget())) {
//client has built in los check, but auto fire does not.. done last.
RangedAttack(GetTarget());
if (CheckDoubleRangedAttack())
RangedAttack(GetTarget(), true);
}
else
ranged_timer.Start();
}
else
ranged_timer.Start();
}
else
ranged_timer.Start();
}
}
else if (ranged->GetItem() && (ranged->GetItem()->ItemType == EQ::item::ItemTypeLargeThrowing || ranged->GetItem()->ItemType == EQ::item::ItemTypeSmallThrowing)) {
if (ranged_timer.Check(false)) {
if (GetTarget() && (GetTarget()->IsNPC() || GetTarget()->IsClient())) {
if (GetTarget()->InFrontMob(this, GetTarget()->GetX(), GetTarget()->GetY())) {
if (CheckLosFN(GetTarget())) {
//client has built in los check, but auto fire does not.. done last.
ThrowingAttack(GetTarget());
}
else
ranged_timer.Start();
}
else
ranged_timer.Start();
}
else
ranged_timer.Start();
}
}
}
}
Mob *auto_attack_target = GetTarget();
if (auto_attack && auto_attack_target != nullptr && may_use_attacks && attack_timer.Check()) {
//check if change
//only check on primary attack.. sorry offhand you gotta wait!
if (aa_los_them_mob) {
if (auto_attack_target != aa_los_them_mob ||
m_AutoAttackPosition.x != GetX() ||
m_AutoAttackPosition.y != GetY() ||
m_AutoAttackPosition.z != GetZ() ||
m_AutoAttackTargetLocation.x != aa_los_them_mob->GetX() ||
m_AutoAttackTargetLocation.y != aa_los_them_mob->GetY() ||
m_AutoAttackTargetLocation.z != aa_los_them_mob->GetZ()) {
aa_los_them_mob = auto_attack_target;
m_AutoAttackPosition = GetPosition();
m_AutoAttackTargetLocation = glm::vec3(aa_los_them_mob->GetPosition());
los_status = CheckLosFN(auto_attack_target);
los_status_facing = IsFacingMob(aa_los_them_mob);
}
// If only our heading changes, we can skip the CheckLosFN call
// but above we still need to update los_status_facing
if (m_AutoAttackPosition.w != GetHeading()) {
m_AutoAttackPosition.w = GetHeading();
los_status_facing = IsFacingMob(aa_los_them_mob);
}
}
else {
aa_los_them_mob = auto_attack_target;
m_AutoAttackPosition = GetPosition();
m_AutoAttackTargetLocation = glm::vec3(aa_los_them_mob->GetPosition());
los_status = CheckLosFN(auto_attack_target);
los_status_facing = IsFacingMob(aa_los_them_mob);
}
if (!CombatRange(auto_attack_target)) {
MessageString(Chat::TooFarAway, TARGET_TOO_FAR);
}
else if (auto_attack_target == this) {
MessageString(Chat::TooFarAway, TRY_ATTACKING_SOMEONE);
}
else if (!los_status || !los_status_facing) {
//you can't see your target
}
else if (auto_attack_target->GetHP() > -10) // -10 so we can watch people bleed in PvP
{
EQ::ItemInstance *wpn = GetInv().GetItem(EQ::invslot::slotPrimary);
TryWeaponProc(wpn, auto_attack_target, EQ::invslot::slotPrimary);
TriggerDefensiveProcs(auto_attack_target, EQ::invslot::slotPrimary, false);
DoAttackRounds(auto_attack_target, EQ::invslot::slotPrimary);
if (TryDoubleMeleeRoundEffect()) {
DoAttackRounds(auto_attack_target, EQ::invslot::slotPrimary);
}
if (CheckAATimer(aaTimerRampage)) {
entity_list.AEAttack(this, 30);
}
}
}
if (GetClass() == WARRIOR || GetClass() == BERSERKER) {
if (!dead && !IsBerserk() && GetHPRatio() < RuleI(Combat, BerserkerFrenzyStart)) {
entity_list.MessageCloseString(this, false, 200, 0, BERSERK_START, GetName());
berserk = true;
}
if (IsBerserk() && GetHPRatio() > RuleI(Combat, BerserkerFrenzyEnd)) {
entity_list.MessageCloseString(this, false, 200, 0, BERSERK_END, GetName());
berserk = false;
}
}
if (auto_attack && may_use_attacks && auto_attack_target != nullptr
&& CanThisClassDualWield() && attack_dw_timer.Check())
{
// Range check
if (!CombatRange(auto_attack_target)) {
// this is a duplicate message don't use it.
//MessageString(Chat::TooFarAway,TARGET_TOO_FAR);
}
// Don't attack yourself
else if (auto_attack_target == this) {
//MessageString(Chat::TooFarAway,TRY_ATTACKING_SOMEONE);
}
else if (!los_status || !los_status_facing)
{
//you can't see your target
}
else if (auto_attack_target->GetHP() > -10) {
CheckIncreaseSkill(EQ::skills::SkillDualWield, auto_attack_target, -10);
if (CheckDualWield()) {
EQ::ItemInstance *wpn = GetInv().GetItem(EQ::invslot::slotSecondary);
TryWeaponProc(wpn, auto_attack_target, EQ::invslot::slotSecondary);
DoAttackRounds(auto_attack_target, EQ::invslot::slotSecondary);
}
}
}
if (HasVirus()) {
if (viral_timer.Check()) {
viral_timer_counter++;
for (int i = 0; i < MAX_SPELL_TRIGGER * 2; i += 2) {
if (viral_spells[i]) {
if (viral_timer_counter % spells[viral_spells[i]].viral_timer == 0) {
SpreadVirus(viral_spells[i], viral_spells[i + 1]);
}
}
}
}
if (viral_timer_counter > 999)
viral_timer_counter = 0;
}
ProjectileAttack();
if (spellbonuses.GravityEffect == 1) {
if (gravity_timer.Check())
DoGravityEffect();
}
if (shield_timer.Check()) {
ShieldAbilityFinish();
}
SpellProcess();
if (endupkeep_timer.Check() && !dead) {
DoEnduranceUpkeep();
}
// this is independent of the tick timer
if (consume_food_timer.Check())
DoStaminaHungerUpdate();
if (tic_timer.Check() && !dead) {
CalcMaxHP();
CalcMaxMana();
CalcATK();
CalcMaxEndurance();
CalcRestState();
DoHPRegen();
DoManaRegen();
DoEnduranceRegen();
BuffProcess();
if (tribute_timer.Check()) {
ToggleTribute(true); //re-activate the tribute.
}
if (fishing_timer.Check()) {
GoFish();
}
if (autosave_timer.Check()) {
Save(0);
}
if (m_pp.intoxication > 0)
{
--m_pp.intoxication;
CalcBonuses();
}
if (ItemTickTimer.Check())
{
TickItemCheck();
}
if (ItemQuestTimer.Check())
{
ItemTimerCheck();
}
}
}
if (focus_proc_limit_timer.Check() && !dead)
FocusProcLimitProcess();
if (client_state == CLIENT_KICKED) {
Save();
OnDisconnect(true);
std::cout << "Client disconnected (cs=k): " << GetName() << std::endl;
return false;
}
if (client_state == DISCONNECTED) {
OnDisconnect(true);
std::cout << "Client disconnected (cs=d): " << GetName() << std::endl;
database.SetMQDetectionFlag(this->AccountName(), GetName(), "/MQInstantCamp: Possible instant camp disconnect.", zone->GetShortName());
return false;
}
if (client_state == CLIENT_ERROR) {
OnDisconnect(true);
std::cout << "Client disconnected (cs=e): " << GetName() << std::endl;
return false;
}
if (client_state != CLIENT_LINKDEAD && !eqs->CheckState(ESTABLISHED)) {
OnDisconnect(true);
LogInfo("Client linkdead: {}", name);
if (Admin() > 100) {
if (GetMerc()) {
GetMerc()->Save();
GetMerc()->Depop();
}
return false;
}
else if (!linkdead_timer.Enabled()) {
linkdead_timer.Start(RuleI(Zone, ClientLinkdeadMS));
client_state = CLIENT_LINKDEAD;
AI_Start(CLIENT_LD_TIMEOUT);
SendAppearancePacket(AT_Linkdead, 1);
SetDynamicZoneMemberStatus(DynamicZoneMemberStatus::LinkDead);
}
}
/************ Get all packets from packet manager out queue and process them ************/
EQApplicationPacket *app = nullptr;
if (!eqs->CheckState(CLOSING))
{
while (app = eqs->PopPacket()) {
HandlePacket(app);
safe_delete(app);
}
}
//At this point, we are still connected, everything important has taken
//place, now check to see if anybody wants to aggro us.
// only if client is not feigned
if (zone->CanDoCombat() && ret && !GetFeigned() && client_scan_npc_aggro_timer.Check()) {
int npc_scan_count = 0;
for (auto & close_mob : close_mobs) {
Mob *mob = close_mob.second;
if (!mob)
continue;
if (mob->IsClient())
continue;
if (mob->CheckWillAggro(this) && !mob->CheckAggro(this)) {
mob->AddToHateList(this, 25);
}
npc_scan_count++;
}
LogAggro("Checking Reverse Aggro (client->npc) scanned_npcs ([{}])", npc_scan_count);
}
if (client_state != CLIENT_LINKDEAD && (client_state == CLIENT_ERROR || client_state == DISCONNECTED || client_state == CLIENT_KICKED || !eqs->CheckState(ESTABLISHED)))
{
//client logged out or errored out
//ResetTrade();
if (client_state != CLIENT_KICKED && !bZoning && !instalog) {
Save();
}
client_state = CLIENT_LINKDEAD;
if (bZoning || instalog || GetGM())
{
Group *mygroup = GetGroup();
if (mygroup)
{
if (!bZoning)
{
entity_list.MessageGroup(this, true, 15, "%s logged out.", GetName());
LeaveGroup();
}
else
{
entity_list.MessageGroup(this, true, 15, "%s left the zone.", GetName());
mygroup->MemberZoned(this);
if (GetMerc() && GetMerc()->HasGroup())
{
GetMerc()->RemoveMercFromGroup(GetMerc(), GetMerc()->GetGroup());
}
}
}
Raid *myraid = entity_list.GetRaidByClient(this);
if (myraid)
{
if (!bZoning)
{
//entity_list.MessageGroup(this,true,15,"%s logged out.",GetName());
myraid->MemberZoned(this);
}
else
{
//entity_list.MessageGroup(this,true,15,"%s left the zone.",GetName());
myraid->MemberZoned(this);
}
}
OnDisconnect(false);
return false;
}
else
{
LinkDead();
}
OnDisconnect(true);
}
// Feign Death 2 minutes and zone forgets you
if (forget_timer.Check()) {
forget_timer.Disable();
entity_list.ClearZoneFeignAggro(this);
Message(0, "Your enemies have forgotten you!");
}
if (client_state == CLIENT_CONNECTED) {
if (m_dirtyautohaters)
ProcessXTargetAutoHaters();
if (aggro_meter_timer.Check())
ProcessAggroMeter();
}
return ret;
}
/* Just a set of actions preformed all over in Client::Process */
void Client::OnDisconnect(bool hard_disconnect) {
if(hard_disconnect)
{
LeaveGroup();
if (GetMerc())
{
GetMerc()->Save();
GetMerc()->Depop();
}
Raid *MyRaid = entity_list.GetRaidByClient(this);
if (MyRaid)
MyRaid->MemberZoned(this);
parse->EventPlayer(EVENT_DISCONNECT, this, "", 0);
/* QS: PlayerLogConnectDisconnect */
if (RuleB(QueryServ, PlayerLogConnectDisconnect)){
std::string event_desc = StringFormat("Disconnect :: in zoneid:%i instid:%i", this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Connect_State, this->CharacterID(), event_desc);
}
}
if (!bZoning)
{
SetDynamicZoneMemberStatus(DynamicZoneMemberStatus::Offline);
}
RemoveAllAuras();
Mob *Other = trade->With();
if(Other)
{
LogTrading("Client disconnected during a trade. Returning their items");
FinishTrade(this);
if(Other->IsClient())
Other->CastToClient()->FinishTrade(Other);
/* Reset both sides of the trade */
trade->Reset();
Other->trade->Reset();
}
database.SetFirstLogon(CharacterID(), 0); //We change firstlogon status regardless of if a player logs out to zone or not, because we only want to trigger it on their first login from world.
/* Remove ourself from all proximities */
ClearAllProximities();
auto outapp = new EQApplicationPacket(OP_LogoutReply);
FastQueuePacket(&outapp);
Disconnect();
}
// Sends the client complete inventory used in character login
void Client::BulkSendInventoryItems()
{
// LINKDEAD TRADE ITEMS
// Move trade slot items back into normal inventory..need them there now for the proceeding validity checks
for (int16 slot_id = EQ::invslot::TRADE_BEGIN; slot_id <= EQ::invslot::TRADE_END; slot_id++) {
EQ::ItemInstance* inst = m_inv.PopItem(slot_id);
if(inst) {
bool is_arrow = (inst->GetItem()->ItemType == EQ::item::ItemTypeArrow) ? true : false;
int16 free_slot_id = m_inv.FindFreeSlot(inst->IsClassBag(), true, inst->GetItem()->Size, is_arrow);
LogInventory("Incomplete Trade Transaction: Moving [{}] from slot [{}] to [{}]", inst->GetItem()->Name, slot_id, free_slot_id);
PutItemInInventory(free_slot_id, *inst, false);
database.SaveInventory(character_id, nullptr, slot_id);
safe_delete(inst);
}
}
bool deletenorent = database.NoRentExpired(GetName());
if (deletenorent) { //client was offline for more than 30 minutes, delete no rent items
if (RuleB(Inventory, TransformSummonedBags))
DisenchantSummonedBags(false);
RemoveNoRent(false);
}
RemoveDuplicateLore(false);
MoveSlotNotAllowed(false);
EQ::OutBuffer ob;
EQ::OutBuffer::pos_type last_pos = ob.tellp();
// Possessions items
for (int16 slot_id = EQ::invslot::POSSESSIONS_BEGIN; slot_id <= EQ::invslot::POSSESSIONS_END; slot_id++) {
const EQ::ItemInstance* inst = m_inv[slot_id];
if (!inst)
continue;
inst->Serialize(ob, slot_id);
if (ob.tellp() == last_pos)
LogInventory("Serialization failed on item slot [{}] during BulkSendInventoryItems. Item skipped", slot_id);
last_pos = ob.tellp();
}
// Bank items
for (int16 slot_id = EQ::invslot::BANK_BEGIN; slot_id <= EQ::invslot::BANK_END; slot_id++) {
const EQ::ItemInstance* inst = m_inv[slot_id];
if (!inst)
continue;
inst->Serialize(ob, slot_id);
if (ob.tellp() == last_pos)
LogInventory("Serialization failed on item slot [{}] during BulkSendInventoryItems. Item skipped", slot_id);
last_pos = ob.tellp();
}
// SharedBank items
for (int16 slot_id = EQ::invslot::SHARED_BANK_BEGIN; slot_id <= EQ::invslot::SHARED_BANK_END; slot_id++) {
const EQ::ItemInstance* inst = m_inv[slot_id];
if (!inst)
continue;
inst->Serialize(ob, slot_id);
if (ob.tellp() == last_pos)
LogInventory("Serialization failed on item slot [{}] during BulkSendInventoryItems. Item skipped", slot_id);
last_pos = ob.tellp();
}
auto outapp = new EQApplicationPacket(OP_CharInventory);
outapp->size = ob.size();
outapp->pBuffer = ob.detach();
QueuePacket(outapp);
safe_delete(outapp);
}
void Client::BulkSendMerchantInventory(int merchant_id, int npcid) {
const EQ::ItemData* handyitem = nullptr;
uint32 numItemSlots = 80; //The max number of items passed in the transaction.
if (m_ClientVersionBit & EQ::versions::maskRoFAndLater) { // RoF+ can send 200 items
numItemSlots = 200;
}
const EQ::ItemData *item = nullptr;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
Mob* merch = entity_list.GetMobByNpcTypeID(npcid);
if (merlist.size() == 0) { //Attempt to load the data, it might have been missed if someone spawned the merchant after the zone was loaded
zone->LoadNewMerchantData(merchant_id);
merlist = zone->merchanttable[merchant_id];
if (merlist.size() == 0)
return;
}
std::list<TempMerchantList> tmp_merlist = zone->tmpmerchanttable[npcid];
std::list<TempMerchantList>::iterator tmp_itr;
uint32 i = 1;
uint8 handychance = 0;
for (itr = merlist.begin(); itr != merlist.end() && i <= numItemSlots; ++itr) {
MerchantList ml = *itr;
if (ml.probability != 100 && zone->random.Int(1, 100) > ml.probability)
continue;
if (GetLevel() < ml.level_required)
continue;
if (!(ml.classes_required & (1 << (GetClass() - 1))))
continue;
int32 fac = merch ? merch->GetPrimaryFaction() : 0;
int32 cur_fac_level;
if (fac == 0 || sneaking) {
cur_fac_level = 0;
}
else {
cur_fac_level = GetModCharacterFactionLevel(fac);
}
if (cur_fac_level < ml.faction_required)
continue;
handychance = zone->random.Int(0, merlist.size() + tmp_merlist.size() - 1);
item = database.GetItem(ml.item);
if (item) {
if (handychance == 0)
handyitem = item;
else
handychance--;
int charges = 1;
if (item->IsClassCommon())
charges = item->MaxCharges;
EQ::ItemInstance* inst = database.CreateItem(item, charges);
if (inst) {
if (RuleB(Merchant, UsePriceMod)) {
inst->SetPrice((item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate * Client::CalcPriceMod(merch, false)));
}
else
inst->SetPrice((item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate));
inst->SetMerchantSlot(ml.slot);
inst->SetMerchantCount(-1); //unlimited
if (charges > 0)
inst->SetCharges(charges);
else
inst->SetCharges(1);
SendItemPacket(ml.slot - 1, inst, ItemPacketMerchant);
safe_delete(inst);
}
}
// Account for merchant lists with gaps.
if (ml.slot >= i) {
if (ml.slot > i)
LogDebug("(WARNING) Merchantlist contains gap at slot [{}]. Merchant: [{}], NPC: [{}]", i, merchant_id, npcid);
i = ml.slot + 1;
}
}
std::list<TempMerchantList> origtmp_merlist = zone->tmpmerchanttable[npcid];
tmp_merlist.clear();
for (tmp_itr = origtmp_merlist.begin(); tmp_itr != origtmp_merlist.end() && i <= numItemSlots; ++tmp_itr) {
TempMerchantList ml = *tmp_itr;
item = database.GetItem(ml.item);
ml.slot = i;
if (item) {
if (handychance == 0)
handyitem = item;
else
handychance--;
int charges = 1;
//if(item->ItemClass==ItemClassCommon && (int16)ml.charges <= item->MaxCharges)
// charges=ml.charges;
//else
charges = item->MaxCharges;
EQ::ItemInstance* inst = database.CreateItem(item, charges);
if (inst) {
if (RuleB(Merchant, UsePriceMod)) {
inst->SetPrice((item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate * Client::CalcPriceMod(merch, false)));
}
else
inst->SetPrice((item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate));
inst->SetMerchantSlot(ml.slot);
inst->SetMerchantCount(ml.charges);
if(charges > 0)
inst->SetCharges(item->MaxCharges);//inst->SetCharges(charges);
else
inst->SetCharges(1);
SendItemPacket(ml.slot-1, inst, ItemPacketMerchant);
safe_delete(inst);
}
}
tmp_merlist.push_back(ml);
i++;
}
//this resets the slot
zone->tmpmerchanttable[npcid] = tmp_merlist;
if (merch != nullptr && handyitem) {
char handy_id[8] = { 0 };
int greeting = zone->random.Int(0, 4);
int greet_id = 0;
switch (greeting) {
case 1:
greet_id = MERCHANT_GREETING;
break;
case 2:
greet_id = MERCHANT_HANDY_ITEM1;
break;
case 3:
greet_id = MERCHANT_HANDY_ITEM2;
break;
case 4:
greet_id = MERCHANT_HANDY_ITEM3;
break;
default:
greet_id = MERCHANT_HANDY_ITEM4;
}
sprintf(handy_id, "%i", greet_id);
if (greet_id != MERCHANT_GREETING)
MessageString(Chat::NPCQuestSay, GENERIC_STRINGID_SAY, merch->GetCleanName(), handy_id, this->GetName(), handyitem->Name);
else
MessageString(Chat::NPCQuestSay, GENERIC_STRINGID_SAY, merch->GetCleanName(), handy_id, this->GetName());
}
// safe_delete_array(cpi);
}
uint8 Client::WithCustomer(uint16 NewCustomer){
if(NewCustomer == 0) {
CustomerID = 0;
return 0;
}
if(CustomerID == 0) {
CustomerID = NewCustomer;
return 1;
}
// Check that the player browsing our wares hasn't gone away.
Client* c = entity_list.GetClientByID(CustomerID);
if(!c) {
LogTrading("Previous customer has gone away");
CustomerID = NewCustomer;
return 1;
}
return 0;
}
void Client::OPRezzAnswer(uint32 Action, uint32 SpellID, uint16 ZoneID, uint16 InstanceID, float x, float y, float z)
{
if(PendingRezzXP < 0) {
// pendingrezexp is set to -1 if we are not expecting an OP_RezzAnswer
LogSpells("Unexpected OP_RezzAnswer. Ignoring it");
Message(Chat::Red, "You have already been resurrected.\n");
return;
}
if (Action == 1)
{
// Mark the corpse as rezzed in the database, just in case the corpse has buried, or the zone the
// corpse is in has shutdown since the rez spell was cast.
database.MarkCorpseAsRezzed(PendingRezzDBID);
LogSpells("Player [{}] got a [{}] Rezz, spellid [{}] in zone[{}], instance id [{}]",
this->name, (uint16)spells[SpellID].base[0],
SpellID, ZoneID, InstanceID);
this->BuffFadeNonPersistDeath();
int SpellEffectDescNum = GetSpellEffectDescNum(SpellID);
// Rez spells with Rez effects have this DescNum (first is Titanium, second is 6.2 Client)
if((SpellEffectDescNum == 82) || (SpellEffectDescNum == 39067)) {
SetMana(0);
SetHP(GetMaxHP()/5);
int rez_eff = 756;
if (RuleB(Character, UseOldRaceRezEffects) &&
(GetRace() == BARBARIAN || GetRace() == DWARF || GetRace() == TROLL || GetRace() == OGRE))
rez_eff = 757;
SpellOnTarget(rez_eff, this); // Rezz effects
}
else {
SetMana(GetMaxMana());
SetHP(GetMaxHP());
}
if(spells[SpellID].base[0] < 100 && spells[SpellID].base[0] > 0 && PendingRezzXP > 0)
{
SetEXP(((int)(GetEXP()+((float)((PendingRezzXP / 100) * spells[SpellID].base[0])))),
GetAAXP(),true);
}
else if (spells[SpellID].base[0] == 100 && PendingRezzXP > 0) {
SetEXP((GetEXP() + PendingRezzXP), GetAAXP(), true);
}
//Was sending the packet back to initiate client zone...
//but that could be abusable, so lets go through proper channels
MovePC(ZoneID, InstanceID, x, y, z, GetHeading(), 0, ZoneSolicited);
entity_list.RefreshClientXTargets(this);
}
PendingRezzXP = -1;
PendingRezzSpellID = 0;
}
void Client::OPTGB(const EQApplicationPacket *app)
{
if(!app) return;
if(!app->pBuffer) return;
uint32 tgb_flag = *(uint32 *)app->pBuffer;
if(tgb_flag == 2)
MessageString(Chat::White, TGB() ? TGB_ON : TGB_OFF);
else
tgb = tgb_flag;
}
void Client::OPMemorizeSpell(const EQApplicationPacket* app)
{
if(app->size != sizeof(MemorizeSpell_Struct))
{
LogError("Wrong size on OP_MemorizeSpell. Got: [{}], Expected: [{}]", app->size, sizeof(MemorizeSpell_Struct));
DumpPacket(app);
return;
}
const MemorizeSpell_Struct* memspell = (const MemorizeSpell_Struct*) app->pBuffer;
if(!IsValidSpell(memspell->spell_id))
{
Message(Chat::Red, "Unexpected error: spell id out of range");
return;
}
if
(
GetClass() > 16 ||
GetLevel() < spells[memspell->spell_id].classes[GetClass()-1]
)
{
char val1[20]={0};
MessageString(Chat::Red,SPELL_LEVEL_TO_LOW,ConvertArray(spells[memspell->spell_id].classes[GetClass()-1],val1),spells[memspell->spell_id].name);
//Message(Chat::Red, "Unexpected error: Class cant use this spell at your level!");
return;
}
switch(memspell->scribing)
{
case memSpellScribing: { // scribing spell to book
const EQ::ItemInstance* inst = m_inv[EQ::invslot::slotCursor];
if (inst && inst->IsClassCommon())
{
const EQ::ItemData* item = inst->GetItem();
if (RuleB(Character, RestrictSpellScribing) && !item->IsEquipable(GetRace(), GetClass())) {
MessageString(Chat::Red, CANNOT_USE_ITEM);
break;
}
if(item && item->Scroll.Effect == (int32)(memspell->spell_id))
{
ScribeSpell(memspell->spell_id, memspell->slot);
DeleteItemInInventory(EQ::invslot::slotCursor, 1, true);
}
else
Message(0,"Scribing spell: inst exists but item does not or spell ids do not match.");
}
else
Message(0,"Scribing a spell without an inst on your cursor?");
break;
}
case memSpellMemorize: { // memming spell
if(HasSpellScribed(memspell->spell_id))
{
MemSpell(memspell->spell_id, memspell->slot);
}
else
{
database.SetMQDetectionFlag(AccountName(), GetName(), "OP_MemorizeSpell but we don't have this spell scribed...", zone->GetShortName());
}
break;
}
case memSpellForget: { // unmemming spell
UnmemSpell(memspell->slot);
break;
}
}
Save();
}
void Client::CancelSneakHide()
{
if (hidden || improved_hidden) {
auto app = new EQApplicationPacket(OP_CancelSneakHide, 0);
FastQueuePacket(&app);
// SoF and Tit send back a OP_SpawnAppearance turning off AT_Invis
// so we need to handle our sneaking flag only
// The later clients send back a OP_Hide (this has a size but data is 0)
// as well as OP_SpawnAppearance with AT_Invis and one with AT_Sneak
// So we don't have to handle any of those flags
if (ClientVersionBit() & EQ::versions::maskSoFAndEarlier)
sneaking = false;
}
}
void Client::BreakInvis()
{
if (invisible)
{
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 0;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
invisible = false;
invisible_undead = false;
invisible_animals = false;
hidden = false;
improved_hidden = false;
}
}
static uint64 CoinTypeCoppers(uint32 type) {
switch(type) {
case COINTYPE_PP:
return(1000);
case COINTYPE_GP:
return(100);
case COINTYPE_SP:
return(10);
case COINTYPE_CP:
default:
break;
}
return(1);
}
void Client::OPMoveCoin(const EQApplicationPacket* app)
{
MoveCoin_Struct* mc = (MoveCoin_Struct*)app->pBuffer;
uint64 value = 0, amount_to_take = 0, amount_to_add = 0;
int32 *from_bucket = 0, *to_bucket = 0;
Mob* trader = trade->With();
// if amount < 0, client is sending a malicious packet
if (mc->amount < 0)
{
return;
}
// could just do a range, but this is clearer and explicit
if
(
(
mc->cointype1 != COINTYPE_PP &&
mc->cointype1 != COINTYPE_GP &&
mc->cointype1 != COINTYPE_SP &&
mc->cointype1 != COINTYPE_CP
) ||
(
mc->cointype2 != COINTYPE_PP &&
mc->cointype2 != COINTYPE_GP &&
mc->cointype2 != COINTYPE_SP &&
mc->cointype2 != COINTYPE_CP
)
)
{
return;
}
switch(mc->from_slot)
{
case -1: // destroy
{
// I don't think you can move coin from the void,
// but need to check this
break;
}
case 0: // cursor
{
switch(mc->cointype1)
{
case COINTYPE_PP:
from_bucket = (int32 *) &m_pp.platinum_cursor; break;
case COINTYPE_GP:
from_bucket = (int32 *) &m_pp.gold_cursor; break;
case COINTYPE_SP:
from_bucket = (int32 *) &m_pp.silver_cursor; break;
case COINTYPE_CP:
from_bucket = (int32 *) &m_pp.copper_cursor; break;
}
break;
}
case 1: // inventory
{
switch(mc->cointype1)
{
case COINTYPE_PP:
from_bucket = (int32 *) &m_pp.platinum; break;
case COINTYPE_GP:
from_bucket = (int32 *) &m_pp.gold; break;
case COINTYPE_SP:
from_bucket = (int32 *) &m_pp.silver; break;
case COINTYPE_CP:
from_bucket = (int32 *) &m_pp.copper; break;
}
break;
}
case 2: // bank
{
uint32 distance = 0;
NPC *banker = entity_list.GetClosestBanker(this, distance);
if(!banker || distance > USE_NPC_RANGE2)
{
auto hacked_string = fmt::format("Player tried to make use of a banker(coin move) but "
"{} is non-existant or too far away ({} units).",
banker ? banker->GetName() : "UNKNOWN NPC", distance);
database.SetMQDetectionFlag(AccountName(), GetName(), hacked_string, zone->GetShortName());
return;
}
switch(mc->cointype1)
{
case COINTYPE_PP:
from_bucket = (int32 *) &m_pp.platinum_bank; break;
case COINTYPE_GP:
from_bucket = (int32 *) &m_pp.gold_bank; break;
case COINTYPE_SP:
from_bucket = (int32 *) &m_pp.silver_bank; break;
case COINTYPE_CP:
from_bucket = (int32 *) &m_pp.copper_bank; break;
}
break;
}
case 3: // trade
{
// can't move coin from trade
break;
}
case 4: // shared bank
{
uint32 distance = 0;
NPC *banker = entity_list.GetClosestBanker(this, distance);
if(!banker || distance > USE_NPC_RANGE2)
{
auto hacked_string =
fmt::format("Player tried to make use of a banker(shared coin move) but {} is "
"non-existant or too far away ({} units).",
banker ? banker->GetName() : "UNKNOWN NPC", distance);
database.SetMQDetectionFlag(AccountName(), GetName(), hacked_string, zone->GetShortName());
return;
}
if(mc->cointype1 == COINTYPE_PP) // there's only platinum here
from_bucket = (int32 *) &m_pp.platinum_shared;
break;
}
}
switch(mc->to_slot)
{
case -1: // destroy
{
// no action required
break;
}
case 0: // cursor
{
switch(mc->cointype2)
{
case COINTYPE_PP:
to_bucket = (int32 *) &m_pp.platinum_cursor; break;
case COINTYPE_GP:
to_bucket = (int32 *) &m_pp.gold_cursor; break;
case COINTYPE_SP:
to_bucket = (int32 *) &m_pp.silver_cursor; break;
case COINTYPE_CP:
to_bucket = (int32 *) &m_pp.copper_cursor; break;
}
break;
}
case 1: // inventory
{
switch(mc->cointype2)
{
case COINTYPE_PP:
to_bucket = (int32 *) &m_pp.platinum; break;
case COINTYPE_GP:
to_bucket = (int32 *) &m_pp.gold; break;
case COINTYPE_SP:
to_bucket = (int32 *) &m_pp.silver; break;
case COINTYPE_CP:
to_bucket = (int32 *) &m_pp.copper; break;
}
break;
}
case 2: // bank
{
uint32 distance = 0;
NPC *banker = entity_list.GetClosestBanker(this, distance);
if(!banker || distance > USE_NPC_RANGE2)
{
auto hacked_string = fmt::format("Player tried to make use of a banker(coin move) but "
"{} is non-existant or too far away ({} units).",
banker ? banker->GetName() : "UNKNOWN NPC", distance);
database.SetMQDetectionFlag(AccountName(), GetName(), hacked_string, zone->GetShortName());
return;
}
switch(mc->cointype2)
{
case COINTYPE_PP:
to_bucket = (int32 *) &m_pp.platinum_bank; break;
case COINTYPE_GP:
to_bucket = (int32 *) &m_pp.gold_bank; break;
case COINTYPE_SP:
to_bucket = (int32 *) &m_pp.silver_bank; break;
case COINTYPE_CP:
to_bucket = (int32 *) &m_pp.copper_bank; break;
}
break;
}
case 3: // trade
{
if(trader)
{
switch(mc->cointype2)
{
case COINTYPE_PP:
to_bucket = (int32 *) &trade->pp; break;
case COINTYPE_GP:
to_bucket = (int32 *) &trade->gp; break;
case COINTYPE_SP:
to_bucket = (int32 *) &trade->sp; break;
case COINTYPE_CP:
to_bucket = (int32 *) &trade->cp; break;
}
}
break;
}
case 4: // shared bank
{
uint32 distance = 0;
NPC *banker = entity_list.GetClosestBanker(this, distance);
if(!banker || distance > USE_NPC_RANGE2)
{
auto hacked_string =
fmt::format("Player tried to make use of a banker(shared coin move) but {} is "
"non-existant or too far away ({} units).",
banker ? banker->GetName() : "UNKNOWN NPC", distance);
database.SetMQDetectionFlag(AccountName(), GetName(), hacked_string, zone->GetShortName());
return;
}
if(mc->cointype2 == COINTYPE_PP) // there's only platinum here
to_bucket = (int32 *) &m_pp.platinum_shared;
break;
}
}
if(!from_bucket)
{
return;
}
// don't allow them to go into negatives (from our point of view)
amount_to_take = *from_bucket < mc->amount ? *from_bucket : mc->amount;
// if you move 11 gold into a bank platinum location, the packet
// will say 11, but the client will have 1 left on their cursor, so we have
// to figure out the conversion ourselves
amount_to_add = amount_to_take * ((float)CoinTypeCoppers(mc->cointype1) / (float)CoinTypeCoppers(mc->cointype2));
// the amount we're adding could be different than what was requested, so
// we have to adjust the amount we take as well
amount_to_take = amount_to_add * ((float)CoinTypeCoppers(mc->cointype2) / (float)CoinTypeCoppers(mc->cointype1));
// now we should have a from_bucket, a to_bucket, an amount_to_take
// and an amount_to_add
// now we actually take it from the from bucket. if there's an error
// with the destination slot, they lose their money
*from_bucket -= amount_to_take;
// why are intentionally inducing a crash here rather than letting the code attempt to stumble on?
// assert(*from_bucket >= 0);
if(to_bucket)
{
if(*to_bucket + amount_to_add > *to_bucket) // overflow check
*to_bucket += amount_to_add;
//shared bank plat
if (RuleB(Character, SharedBankPlat))
{
if (to_bucket == &m_pp.platinum_shared || from_bucket == &m_pp.platinum_shared)
{
if (from_bucket == &m_pp.platinum_shared)
amount_to_add = 0 - amount_to_take;
database.SetSharedPlatinum(AccountID(),amount_to_add);
}
}
else{
if (to_bucket == &m_pp.platinum_shared || from_bucket == &m_pp.platinum_shared){
this->SendPopupToClient(
"Shared Bank Warning",
"<c \"#F62217\">::: WARNING! :::<br>"
"SHARED BANK IS DISABLED AND YOUR PLATINUM WILL BE DESTROYED IF YOU PUT IT HERE!</c>"
);
this->Message(Chat::Red, "::: WARNING! ::: SHARED BANK IS DISABLED AND YOUR PLATINUM WILL BE DESTROYED IF YOU PUT IT HERE!");
}
}
}
// if this is a trade move, inform the person being traded with
if(mc->to_slot == 3 && trader && trader->IsClient())
{
// If one party accepted the trade then some coin was added, their state needs to be reset
trade->state = Trading;
Mob* with = trade->With();
if (with)
with->trade->state = Trading;
Client* recipient = trader->CastToClient();
recipient->Message(Chat::Yellow, "%s adds some coins to the trade.", GetName());
recipient->Message(Chat::Yellow, "The total trade is: %i PP, %i GP, %i SP, %i CP",
trade->pp, trade->gp,
trade->sp, trade->cp
);
auto outapp = new EQApplicationPacket(OP_TradeCoins, sizeof(TradeCoin_Struct));
TradeCoin_Struct* tcs = (TradeCoin_Struct*)outapp->pBuffer;
tcs->trader = trader->GetID();
tcs->slot = mc->cointype2;
tcs->unknown5 = 0x4fD2;
tcs->unknown7 = 0;
tcs->amount = amount_to_add;
recipient->QueuePacket(outapp);
safe_delete(outapp);
}
SaveCurrency();
}
void Client::OPGMTraining(const EQApplicationPacket *app)
{
EQApplicationPacket* outapp = app->Copy();
GMTrainee_Struct* gmtrain = (GMTrainee_Struct*) outapp->pBuffer;
Mob* pTrainer = entity_list.GetMob(gmtrain->npcid);
if(!pTrainer || !pTrainer->IsNPC() || pTrainer->GetClass() < WARRIORGM || pTrainer->GetClass() > BERSERKERGM)
return;
//you can only use your own trainer, client enforces this, but why trust it
if (!RuleB(Character, AllowCrossClassTrainers)) {
int trains_class = pTrainer->GetClass() - (WARRIORGM - WARRIOR);
if (GetClass() != trains_class)
return;
}
//you have to be somewhat close to a trainer to be properly using them
if(DistanceSquared(m_Position,pTrainer->GetPosition()) > USE_NPC_RANGE2)
return;
// if this for-loop acts up again (crashes linux), try enabling the before and after #pragmas
//#pragma GCC push_options
//#pragma GCC optimize ("O0")
for (int sk = EQ::skills::Skill1HBlunt; sk <= EQ::skills::HIGHEST_SKILL; ++sk) {
if (sk == EQ::skills::SkillTinkering && GetRace() != GNOME) {
gmtrain->skills[sk] = 0; //Non gnomes can't tinker!
} else {
gmtrain->skills[sk] = GetMaxSkillAfterSpecializationRules((EQ::skills::SkillType)sk, MaxSkill((EQ::skills::SkillType)sk, GetClass(), RuleI(Character, MaxLevel)));
//this is the highest level that the trainer can train you to, this is enforced clientside so we can't just
//Set it to 1 with CanHaveSkill or you wont be able to train past 1.
}
}
if (ClientVersion() < EQ::versions::ClientVersion::RoF2 && GetClass() == BERSERKER) {
gmtrain->skills[EQ::skills::Skill1HPiercing] = gmtrain->skills[EQ::skills::Skill2HPiercing];
gmtrain->skills[EQ::skills::Skill2HPiercing] = 0;
}
//#pragma GCC pop_options
uchar ending[]={0x34,0x87,0x8a,0x3F,0x01
,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9
,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9,0xC9
,0x76,0x75,0x3f};
memcpy(&outapp->pBuffer[outapp->size-40],ending,sizeof(ending));
FastQueuePacket(&outapp);
// welcome message
if (pTrainer && pTrainer->IsNPC())
{
pTrainer->SayString(zone->random.Int(1204, 1207), GetCleanName());
}
}
void Client::OPGMEndTraining(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_GMEndTrainingResponse, 0);
GMTrainEnd_Struct *p = (GMTrainEnd_Struct *)app->pBuffer;
FastQueuePacket(&outapp);
Mob* pTrainer = entity_list.GetMob(p->npcid);
if(!pTrainer || !pTrainer->IsNPC() || pTrainer->GetClass() < WARRIORGM || pTrainer->GetClass() > BERSERKERGM)
return;
//you can only use your own trainer, client enforces this, but why trust it
if (!RuleB(Character, AllowCrossClassTrainers)) {
int trains_class = pTrainer->GetClass() - (WARRIORGM - WARRIOR);
if (GetClass() != trains_class)
return;
}
//you have to be somewhat close to a trainer to be properly using them
if(DistanceSquared(m_Position, pTrainer->GetPosition()) > USE_NPC_RANGE2)
return;
// goodbye message
if (pTrainer->IsNPC())
{
pTrainer->SayString(zone->random.Int(1208, 1211), GetCleanName());
}
}
void Client::OPGMTrainSkill(const EQApplicationPacket *app)
{
if(!m_pp.points)
return;
int Cost = 0;
GMSkillChange_Struct* gmskill = (GMSkillChange_Struct*) app->pBuffer;
Mob* pTrainer = entity_list.GetMob(gmskill->npcid);
if(!pTrainer || !pTrainer->IsNPC() || pTrainer->GetClass() < WARRIORGM || pTrainer->GetClass() > BERSERKERGM)
return;
//you can only use your own trainer, client enforces this, but why trust it
if (!RuleB(Character, AllowCrossClassTrainers)) {
int trains_class = pTrainer->GetClass() - (WARRIORGM - WARRIOR);
if (GetClass() != trains_class)
return;
}
//you have to be somewhat close to a trainer to be properly using them
if(DistanceSquared(m_Position, pTrainer->GetPosition()) > USE_NPC_RANGE2)
return;
if (gmskill->skillbank == 0x01)
{
// languages go here
if (gmskill->skill_id > 25)
{
std::cout << "Wrong Training Skill (languages)" << std::endl;
DumpPacket(app);
return;
}
int AdjustedSkillLevel = GetLanguageSkill(gmskill->skill_id) - 10;
if(AdjustedSkillLevel > 0)
Cost = AdjustedSkillLevel * AdjustedSkillLevel * AdjustedSkillLevel / 100;
IncreaseLanguageSkill(gmskill->skill_id);
}
else if (gmskill->skillbank == 0x00)
{
// normal skills go here
if (gmskill->skill_id > EQ::skills::HIGHEST_SKILL)
{
std::cout << "Wrong Training Skill (abilities)" << std::endl;
DumpPacket(app);
return;
}
EQ::skills::SkillType skill = (EQ::skills::SkillType)gmskill->skill_id;
if(!CanHaveSkill(skill)) {
LogSkills("Tried to train skill [{}], which is not allowed", skill);
return;
}
if(MaxSkill(skill) == 0) {
LogSkills("Tried to train skill [{}], but training is not allowed at this level", skill);
return;
}
uint16 skilllevel = GetRawSkill(skill);
if(skilllevel == 0) {
//this is a new skill..
uint16 t_level = SkillTrainLevel(skill, GetClass());
if (t_level == 0)
{
return;
}
SetSkill(skill, t_level);
} else {
switch(skill) {
case EQ::skills::SkillBrewing:
case EQ::skills::SkillMakePoison:
case EQ::skills::SkillTinkering:
case EQ::skills::SkillResearch:
case EQ::skills::SkillAlchemy:
case EQ::skills::SkillBaking:
case EQ::skills::SkillTailoring:
case EQ::skills::SkillBlacksmithing:
case EQ::skills::SkillFletching:
case EQ::skills::SkillJewelryMaking:
case EQ::skills::SkillPottery:
if(skilllevel >= RuleI(Skills, MaxTrainTradeskills)) {
MessageString(Chat::Red, MORE_SKILLED_THAN_I, pTrainer->GetCleanName());
return;
}
break;
case EQ::skills::SkillSpecializeAbjure:
case EQ::skills::SkillSpecializeAlteration:
case EQ::skills::SkillSpecializeConjuration:
case EQ::skills::SkillSpecializeDivination:
case EQ::skills::SkillSpecializeEvocation:
if(skilllevel >= RuleI(Skills, MaxTrainSpecializations)) {
MessageString(Chat::Red, MORE_SKILLED_THAN_I, pTrainer->GetCleanName());
return;
}
default:
break;
}
int MaxSkillValue = MaxSkill(skill);
if (skilllevel >= MaxSkillValue)
{
// Don't allow training over max skill level
MessageString(Chat::Red, MORE_SKILLED_THAN_I, pTrainer->GetCleanName());
return;
}
if (gmskill->skill_id >= EQ::skills::SkillSpecializeAbjure && gmskill->skill_id <= EQ::skills::SkillSpecializeEvocation)
{
int MaxSpecSkill = GetMaxSkillAfterSpecializationRules(skill, MaxSkillValue);
if (skilllevel >= MaxSpecSkill)
{
// Restrict specialization training to follow the rules
MessageString(Chat::Red, MORE_SKILLED_THAN_I, pTrainer->GetCleanName());
return;
}
}
// Client train a valid skill
//
int AdjustedSkillLevel = skilllevel - 10;
if(AdjustedSkillLevel > 0)
Cost = AdjustedSkillLevel * AdjustedSkillLevel * AdjustedSkillLevel / 100;
SetSkill(skill, skilllevel + 1);
}
}
if (ClientVersion() >= EQ::versions::ClientVersion::SoF) {
// The following packet decreases the skill points left in the Training Window and
// produces the 'You have increased your skill / learned the basics of' message.
//
auto outapp = new EQApplicationPacket(OP_GMTrainSkillConfirm, sizeof(GMTrainSkillConfirm_Struct));
GMTrainSkillConfirm_Struct *gmtsc = (GMTrainSkillConfirm_Struct *)outapp->pBuffer;
gmtsc->SkillID = gmskill->skill_id;
if(gmskill->skillbank == 1) {
gmtsc->NewSkill = (GetLanguageSkill(gmtsc->SkillID) == 1);
gmtsc->SkillID += 100;
}
else
gmtsc->NewSkill = (GetRawSkill((EQ::skills::SkillType)gmtsc->SkillID) == 1);
gmtsc->Cost = Cost;
strcpy(gmtsc->TrainerName, pTrainer->GetCleanName());
QueuePacket(outapp);
safe_delete(outapp);
}
if(Cost)
TakeMoneyFromPP(Cost);
m_pp.points--;
}
// this is used for /summon and /corpse
void Client::OPGMSummon(const EQApplicationPacket *app)
{
GMSummon_Struct* gms = (GMSummon_Struct*) app->pBuffer;
Mob* st = entity_list.GetMob(gms->charname);
if(st && st->IsCorpse())
{
st->CastToCorpse()->Summon(this, false, true);
}
else
{
if(admin < 80)
{
return;
}
if(st)
{
Message(0, "Local: Summoning %s to %f, %f, %f", gms->charname, gms->x, gms->y, gms->z);
if (st->IsClient() && (st->CastToClient()->GetAnon() != 1 || this->Admin() >= st->CastToClient()->Admin()))
st->CastToClient()->MovePC(zone->GetZoneID(), zone->GetInstanceID(), (float)gms->x, (float)gms->y, (float)gms->z, this->GetHeading(), true);
else
st->GMMove(this->GetX(), this->GetY(), this->GetZ(),this->GetHeading());
}
else
{
uint8 tmp = gms->charname[strlen(gms->charname)-1];
if (!worldserver.Connected())
{
Message(0, "Error: World server disconnected");
}
else if (tmp < '0' || tmp > '9') // dont send to world if it's not a player's name
{
auto pack = new ServerPacket(ServerOP_ZonePlayer, sizeof(ServerZonePlayer_Struct));
ServerZonePlayer_Struct* szp = (ServerZonePlayer_Struct*) pack->pBuffer;
strcpy(szp->adminname, this->GetName());
szp->adminrank = this->Admin();
strcpy(szp->name, gms->charname);
strcpy(szp->zone, zone->GetShortName());
szp->x_pos = (float)gms->x;
szp->y_pos = (float)gms->y;
szp->z_pos = (float)gms->z;
szp->ignorerestrictions = 2;
worldserver.SendPacket(pack);
safe_delete(pack);
}
else {
//all options have been exhausted
//summon our target...
if(GetTarget() && GetTarget()->IsCorpse()){
GetTarget()->CastToCorpse()->Summon(this, false, true);
}
}
}
}
}
void Client::DoHPRegen() {
SetHP(GetHP() + CalcHPRegen());
SendHPUpdate();
}
void Client::DoManaRegen() {
if (GetMana() >= max_mana && spellbonuses.ManaRegen >= 0)
return;
if (GetMana() < max_mana && (IsSitting() || CanMedOnHorse()) && HasSkill(EQ::skills::SkillMeditate))
CheckIncreaseSkill(EQ::skills::SkillMeditate, nullptr, -5);
SetMana(GetMana() + CalcManaRegen());
CheckManaEndUpdate();
}
void Client::DoStaminaHungerUpdate()
{
auto outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct *sta = (Stamina_Struct *)outapp->pBuffer;
LogFood("Client::DoStaminaHungerUpdate() hunger_level: [{}] thirst_level: [{}] before loss", m_pp.hunger_level, m_pp.thirst_level);
if (zone->GetZoneID() != 151 && !GetGM()) {
int loss = RuleI(Character, FoodLossPerUpdate);
if (GetHorseId() != 0)
loss *= 3;
m_pp.hunger_level = EQ::Clamp(m_pp.hunger_level - loss, 0, 6000);
m_pp.thirst_level = EQ::Clamp(m_pp.thirst_level - loss, 0, 6000);
if (spellbonuses.hunger) {
m_pp.hunger_level = EQ::ClampLower(m_pp.hunger_level, 3500);
m_pp.thirst_level = EQ::ClampLower(m_pp.thirst_level, 3500);
}
sta->food = m_pp.hunger_level;
sta->water = m_pp.thirst_level;
} else {
// No auto food/drink consumption in the Bazaar
sta->food = 6000;
sta->water = 6000;
}
LogFood("Client::DoStaminaHungerUpdate() Current hunger_level: [{}] = ([{}] minutes left) thirst_level: [{}] = ([{}] minutes left) - after loss",
m_pp.hunger_level, m_pp.hunger_level, m_pp.thirst_level, m_pp.thirst_level);
FastQueuePacket(&outapp);
}
void Client::DoEnduranceRegen()
{
// endurance has some negative mods that could result in a negative regen when starved
int regen = CalcEnduranceRegen();
if (regen < 0 || (regen > 0 && GetEndurance() < GetMaxEndurance()))
SetEndurance(GetEndurance() + regen);
}
void Client::DoEnduranceUpkeep() {
if (!HasEndurUpkeep())
return;
int upkeep_sum = 0;
int cost_redux = spellbonuses.EnduranceReduction + itembonuses.EnduranceReduction + aabonuses.EnduranceReduction;
bool has_effect = false;
uint32 buffs_i;
uint32 buff_count = GetMaxTotalSlots();
for (buffs_i = 0; buffs_i < buff_count; buffs_i++) {
if (buffs[buffs_i].spellid != SPELL_UNKNOWN) {
int upkeep = spells[buffs[buffs_i].spellid].EndurUpkeep;
if(upkeep > 0) {
has_effect = true;
if(cost_redux > 0) {
if(upkeep <= cost_redux)
continue; //reduced to 0
upkeep -= cost_redux;
}
if((upkeep+upkeep_sum) > GetEndurance()) {
//they do not have enough to keep this one going.
BuffFadeBySlot(buffs_i);
} else {
upkeep_sum += upkeep;
}
}
}
}
if(upkeep_sum != 0){
SetEndurance(GetEndurance() - upkeep_sum);
TryTriggerOnCastRequirement();
}
if (!has_effect)
SetEndurUpkeep(false);
}
void Client::CalcRestState()
{
// This method calculates rest state HP and mana regeneration.
// The client must have been out of combat for RuleI(Character, RestRegenTimeToActivate) seconds,
// must be sitting down, and must not have any detrimental spells affecting them.
if(!RuleB(Character, RestRegenEnabled))
return;
ooc_regen = false;
if(AggroCount || !(IsSitting() || CanMedOnHorse()))
return;
if(!rest_timer.Check(false))
return;
// so we don't have aggro, our timer has expired, we do not want this to cause issues
m_pp.RestTimer = 0;
uint32 buff_count = GetMaxTotalSlots();
for (unsigned int j = 0; j < buff_count; j++) {
if(buffs[j].spellid != SPELL_UNKNOWN) {
if(IsDetrimentalSpell(buffs[j].spellid) && (buffs[j].ticsremaining > 0))
if(!DetrimentalSpellAllowsRest(buffs[j].spellid))
return;
}
}
ooc_regen = true;
}
void Client::DoTracking()
{
if (TrackingID == 0)
return;
Mob *m = entity_list.GetMob(TrackingID);
if (!m || m->IsCorpse()) {
MessageString(Chat::Skills, TRACK_LOST_TARGET);
TrackingID = 0;
return;
}
float RelativeHeading = GetHeading() - CalculateHeadingToTarget(m->GetX(), m->GetY());
if (RelativeHeading < 0)
RelativeHeading += 512;
if (RelativeHeading > 480)
MessageString(Chat::Skills, TRACK_STRAIGHT_AHEAD, m->GetCleanName());
else if (RelativeHeading > 416)
MessageString(Chat::Skills, TRACK_AHEAD_AND_TO, m->GetCleanName(), "left");
else if (RelativeHeading > 352)
MessageString(Chat::Skills, TRACK_TO_THE, m->GetCleanName(), "left");
else if (RelativeHeading > 288)
MessageString(Chat::Skills, TRACK_BEHIND_AND_TO, m->GetCleanName(), "left");
else if (RelativeHeading > 224)
MessageString(Chat::Skills, TRACK_BEHIND_YOU, m->GetCleanName());
else if (RelativeHeading > 160)
MessageString(Chat::Skills, TRACK_BEHIND_AND_TO, m->GetCleanName(), "right");
else if (RelativeHeading > 96)
MessageString(Chat::Skills, TRACK_TO_THE, m->GetCleanName(), "right");
else if (RelativeHeading > 32)
MessageString(Chat::Skills, TRACK_AHEAD_AND_TO, m->GetCleanName(), "right");
else if (RelativeHeading >= 0)
MessageString(Chat::Skills, TRACK_STRAIGHT_AHEAD, m->GetCleanName());
}
void Client::HandleRespawnFromHover(uint32 Option)
{
RespawnFromHoverTimer.Disable();
RespawnOption* chosen = nullptr;
bool is_rez = false;
//Find the selected option
if (Option == 0)
{
chosen = &respawn_options.front();
}
else if (Option == (respawn_options.size() - 1))
{
chosen = &respawn_options.back();
is_rez = true; //Rez must always be the last option
}
else
{
std::list<RespawnOption>::iterator itr;
uint32 pos = 0;
for (itr = respawn_options.begin(); itr != respawn_options.end(); ++itr)
{
if (pos++ == Option)
{
chosen = &(*itr);
break;
}
}
}
//If they somehow chose an option they don't have, just send them to bind
RespawnOption* default_to_bind = nullptr;
if (!chosen)
{
/* put error logging here */
BindStruct* b = &m_pp.binds[0];
default_to_bind = new RespawnOption;
default_to_bind->name = "Bind Location";
default_to_bind->zone_id = b->zone_id;
default_to_bind->instance_id = b->instance_id;
default_to_bind->x = b->x;
default_to_bind->y = b->y;
default_to_bind->z = b->z;
default_to_bind->heading = b->heading;
chosen = default_to_bind;
is_rez = false;
}
if (chosen->zone_id == zone->GetZoneID() && chosen->instance_id == zone->GetInstanceID()) //If they should respawn in the current zone...
{
if (is_rez)
{
if (PendingRezzXP < 0 || PendingRezzSpellID == 0)
{
LogSpells("Unexpected Rezz from hover request");
return;
}
SetHP(GetMaxHP() / 5);
Corpse* corpse = entity_list.GetCorpseByName(PendingRezzCorpseName.c_str());
if (corpse)
{
m_Position.x = corpse->GetX();
m_Position.y = corpse->GetY();
m_Position.z = corpse->GetZ();
}
auto outapp =
new EQApplicationPacket(OP_ZonePlayerToBind, sizeof(ZonePlayerToBind_Struct) + 10);
ZonePlayerToBind_Struct* gmg = (ZonePlayerToBind_Struct*) outapp->pBuffer;
gmg->bind_zone_id = zone->GetZoneID();
gmg->bind_instance_id = zone->GetInstanceID();
gmg->x = GetX();
gmg->y = GetY();
gmg->z = GetZ();
gmg->heading = GetHeading();
strcpy(gmg->zone_name, "Resurrect");
FastQueuePacket(&outapp);
ClearHover();
SendHPUpdate();
OPRezzAnswer(1, PendingRezzSpellID, zone->GetZoneID(), zone->GetInstanceID(), GetX(), GetY(), GetZ());
if (corpse && corpse->IsCorpse())
{
LogSpells("Hover Rez in zone [{}] for corpse [{}]",
zone->GetShortName(), PendingRezzCorpseName.c_str());
LogSpells("Found corpse. Marking corpse as rezzed");
corpse->IsRezzed(true);
corpse->CompleteResurrection();
}
}
else //Not rez
{
PendingRezzSpellID = 0;
auto outapp = new EQApplicationPacket(OP_ZonePlayerToBind, sizeof(ZonePlayerToBind_Struct) +
chosen->name.length() + 1);
ZonePlayerToBind_Struct* gmg = (ZonePlayerToBind_Struct*) outapp->pBuffer;
gmg->bind_zone_id = zone->GetZoneID();
gmg->bind_instance_id = chosen->instance_id;
gmg->x = chosen->x;
gmg->y = chosen->y;
gmg->z = chosen->z;
gmg->heading = chosen->heading;
strcpy(gmg->zone_name, chosen->name.c_str());
FastQueuePacket(&outapp);
CalcBonuses();
SetHP(GetMaxHP());
SetMana(GetMaxMana());
SetEndurance(GetMaxEndurance());
m_Position.x = chosen->x;
m_Position.y = chosen->y;
m_Position.z = chosen->z;
m_Position.w = chosen->heading;
ClearHover();
entity_list.RefreshClientXTargets(this);
SendHPUpdate();
}
//After they've respawned into the same zone, trigger EVENT_RESPAWN
parse->EventPlayer(EVENT_RESPAWN, this, static_cast<std::string>(itoa(Option)), is_rez ? 1 : 0);
//Pop Rez option from the respawn options list;
//easiest way to make sure it stays at the end and
//doesn't disrupt adding/removing scripted options
respawn_options.pop_back();
}
else
{
//Heading to a different zone
if(isgrouped)
{
Group *g = GetGroup();
if(g)
g->MemberZoned(this);
}
Raid* r = entity_list.GetRaidByClient(this);
if(r)
r->MemberZoned(this);
m_pp.zone_id = chosen->zone_id;
m_pp.zoneInstance = chosen->instance_id;
database.MoveCharacterToZone(CharacterID(), chosen->zone_id);
Save();
MovePC(chosen->zone_id, chosen->instance_id, chosen->x, chosen->y, chosen->z, chosen->heading, 1);
}
safe_delete(default_to_bind);
}
void Client::ClearHover()
{
// Our Entity ID is currently zero, set in Client::Death
SetID(entity_list.GetFreeID());
auto outapp = new EQApplicationPacket(OP_ZoneEntry, sizeof(ServerZoneEntry_Struct));
ServerZoneEntry_Struct* sze = (ServerZoneEntry_Struct*)outapp->pBuffer;
FillSpawnStruct(&sze->player,CastToMob());
sze->player.spawn.NPC = 0;
sze->player.spawn.z += 6; //arbitrary lift, seems to help spawning under zone.
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
if (IsClient() && CastToClient()->ClientVersionBit() & EQ::versions::maskUFAndLater)
{
EQApplicationPacket *outapp = MakeBuffsPacket(false);
CastToClient()->FastQueuePacket(&outapp);
}
dead = false;
}
void Client::HandleLFGuildResponse(ServerPacket *pack)
{
pack->SetReadPosition(8);
char Tmp[257];
pack->ReadString(Tmp);
pack->ReadSkipBytes(4);
uint32 SubType, NumberOfMatches;
SubType = pack->ReadUInt32();
switch(SubType)
{
case QSG_LFGuild_PlayerMatches:
{
NumberOfMatches = pack->ReadUInt32();
uint32 StartOfMatches = pack->GetReadPosition();
uint32 i = NumberOfMatches;
uint32 PacketSize = 12;
while(i > 0)
{
pack->ReadString(Tmp);
PacketSize += strlen(Tmp) + 1;
pack->ReadString(Tmp);
PacketSize += strlen(Tmp) + 1;
PacketSize += 16;
pack->ReadSkipBytes(16);
--i;
}
auto outapp = new EQApplicationPacket(OP_LFGuild, PacketSize);
outapp->WriteUInt32(3);
outapp->WriteUInt32(0xeb63); // Don't know the significance of this value.
outapp->WriteUInt32(NumberOfMatches);
pack->SetReadPosition(StartOfMatches);
while(NumberOfMatches > 0)
{
pack->ReadString(Tmp);
outapp->WriteString(Tmp);
pack->ReadString(Tmp);
uint32 Level = pack->ReadUInt32();
uint32 Class = pack->ReadUInt32();
uint32 AACount = pack->ReadUInt32();
uint32 TimeZone = pack->ReadUInt32();
outapp->WriteUInt32(Level);
outapp->WriteUInt32(Class);
outapp->WriteUInt32(AACount);
outapp->WriteUInt32(TimeZone);
outapp->WriteString(Tmp);
--NumberOfMatches;
}
FastQueuePacket(&outapp);
break;
}
case QSG_LFGuild_RequestPlayerInfo:
{
auto outapp = new EQApplicationPacket(OP_LFGuild, sizeof(LFGuild_PlayerToggle_Struct));
LFGuild_PlayerToggle_Struct *pts = (LFGuild_PlayerToggle_Struct *)outapp->pBuffer;
pts->Command = 0;
pack->ReadString(pts->Comment);
pts->TimeZone = pack->ReadUInt32();
pts->TimePosted = pack->ReadUInt32();
pts->Toggle = pack->ReadUInt32();
FastQueuePacket(&outapp);
break;
}
case QSG_LFGuild_GuildMatches:
{
NumberOfMatches = pack->ReadUInt32();
uint32 StartOfMatches = pack->GetReadPosition();
uint32 i = NumberOfMatches;
uint32 PacketSize = 12;
while(i > 0)
{
pack->ReadString(Tmp);
PacketSize += strlen(Tmp) + 1;
pack->ReadSkipBytes(4);
pack->ReadString(Tmp);
PacketSize += strlen(Tmp) + 1;
PacketSize += 4;
--i;
}
auto outapp = new EQApplicationPacket(OP_LFGuild, PacketSize);
outapp->WriteUInt32(4);
outapp->WriteUInt32(0xeb63);
outapp->WriteUInt32(NumberOfMatches);
pack->SetReadPosition(StartOfMatches);
while(NumberOfMatches > 0)
{
pack->ReadString(Tmp);
uint32 TimeZone = pack->ReadUInt32();
outapp->WriteString(Tmp);
outapp->WriteUInt32(TimeZone);
pack->ReadString(Tmp);
outapp->WriteString(Tmp);
--NumberOfMatches;
}
FastQueuePacket(&outapp);
break;
}
case QSG_LFGuild_RequestGuildInfo:
{
char Comments[257];
uint32 FromLevel, ToLevel, Classes, AACount, TimeZone, TimePosted;
pack->ReadString(Comments);
FromLevel = pack->ReadUInt32();
ToLevel = pack->ReadUInt32();
Classes = pack->ReadUInt32();
AACount = pack->ReadUInt32();
TimeZone = pack->ReadUInt32();
TimePosted = pack->ReadUInt32();
auto outapp = new EQApplicationPacket(OP_LFGuild, sizeof(LFGuild_GuildToggle_Struct));
LFGuild_GuildToggle_Struct *gts = (LFGuild_GuildToggle_Struct *)outapp->pBuffer;
gts->Command = 1;
strcpy(gts->Comment, Comments);
gts->FromLevel = FromLevel;
gts->ToLevel = ToLevel;
gts->Classes = Classes;
gts->AACount = AACount;
gts->TimeZone = TimeZone;
gts->Toggle = 1;
gts->TimePosted = TimePosted;
gts->Name[0] = 0;
FastQueuePacket(&outapp);
break;
}
default:
break;
}
}
void Client::SendLFGuildStatus()
{
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + 17);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_RequestPlayerInfo);
worldserver.SendPacket(pack);
safe_delete(pack);
}
void Client::SendGuildLFGuildStatus()
{
auto pack = new ServerPacket(ServerOP_QueryServGeneric,
strlen(GetName()) + +strlen(guild_mgr.GetGuildName(GuildID())) + 18);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_RequestGuildInfo);
pack->WriteString(guild_mgr.GetGuildName(GuildID()));
worldserver.SendPacket(pack);
safe_delete(pack);
}
| 1 | 11,021 | Shouldn't need this-> here. | EQEmu-Server | cpp |
@@ -1,4 +1,3 @@
-# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule,
bias_init_with_prob)
from mmcv.ops.nms import batched_nms
from mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh,
build_assigner, build_sampler, multi_apply)
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class YOLOXHead(BaseDenseHead, BBoxTestMixin):
"""YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels in stacking convs.
Default: 256
stacked_convs (int): Number of stacking convs of the head.
Default: 2.
strides (tuple): Downsample factor of each feature map.
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Default: "auto".
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (dict): Config dict for activation layer. Default: None.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_obj (dict): Config of objectness loss.
loss_l1 (dict): Config of L1 loss.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=2,
strides=[8, 16, 32],
use_depthwise=False,
dcn_on_last_conv=False,
conv_bias='auto',
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
loss_bbox=dict(
type='IoULoss',
mode='square',
eps=1e-16,
reduction='sum',
loss_weight=5.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0),
train_cfg=None,
test_cfg=None,
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.use_depthwise = use_depthwise
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.use_sigmoid_cls = True
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_obj = build_loss(loss_obj)
self.use_l1 = False # This flag will be modified by hooks.
self.loss_l1 = build_loss(loss_l1)
self.prior_generator = MlvlPointGenerator(strides, offset=0)
self.test_cfg = test_cfg
self.train_cfg = train_cfg
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# sampling=False so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self._init_layers()
def _init_layers(self):
self.multi_level_cls_convs = nn.ModuleList()
self.multi_level_reg_convs = nn.ModuleList()
self.multi_level_conv_cls = nn.ModuleList()
self.multi_level_conv_reg = nn.ModuleList()
self.multi_level_conv_obj = nn.ModuleList()
for _ in self.strides:
self.multi_level_cls_convs.append(self._build_stacked_convs())
self.multi_level_reg_convs.append(self._build_stacked_convs())
conv_cls, conv_reg, conv_obj = self._build_predictor()
self.multi_level_conv_cls.append(conv_cls)
self.multi_level_conv_reg.append(conv_reg)
self.multi_level_conv_obj.append(conv_obj)
def _build_stacked_convs(self):
"""Initialize conv layers of a single level head."""
conv = DepthwiseSeparableConvModule \
if self.use_depthwise else ConvModule
stacked_convs = []
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
stacked_convs.append(
conv(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
bias=self.conv_bias))
return nn.Sequential(*stacked_convs)
def _build_predictor(self):
"""Initialize predictor layers of a single level head."""
conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
conv_reg = nn.Conv2d(self.feat_channels, 4, 1)
conv_obj = nn.Conv2d(self.feat_channels, 1, 1)
return conv_cls, conv_reg, conv_obj
def init_weights(self):
super(YOLOXHead, self).init_weights()
# Use prior in model initialization to improve stability
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(self.multi_level_conv_cls,
self.multi_level_conv_obj):
conv_cls.bias.data.fill_(bias_init)
conv_obj.bias.data.fill_(bias_init)
def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg,
conv_obj):
"""Forward feature of a single scale level."""
cls_feat = cls_convs(x)
reg_feat = reg_convs(x)
cls_score = conv_cls(cls_feat)
bbox_pred = conv_reg(reg_feat)
objectness = conv_obj(reg_feat)
return cls_score, bbox_pred, objectness
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple[Tensor]: A tuple of multi-level predication map, each is a
4D-tensor of shape (batch_size, 5+num_classes, height, width).
"""
return multi_apply(self.forward_single, feats,
self.multi_level_cls_convs,
self.multi_level_reg_convs,
self.multi_level_conv_cls,
self.multi_level_conv_reg,
self.multi_level_conv_obj)
def get_bboxes(self,
cls_scores,
bbox_preds,
objectnesses,
img_metas=None,
cfg=None,
rescale=False,
with_nms=True):
"""Transform network outputs of a batch into bbox results.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
img_metas (list[dict], Optional): Image meta info. Default None.
cfg (mmcv.Config, Optional): Test / postprocessing configuration,
if None, test_cfg would be used. Default None.
rescale (bool): If True, return boxes in original image space.
Default False.
with_nms (bool): If True, do nms before return boxes.
Default True.
Returns:
list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the predicted class label of
the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds) == len(objectnesses)
cfg = self.test_cfg if cfg is None else cfg
scale_factors = [img_meta['scale_factor'] for img_meta in img_metas]
num_imgs = len(img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, cls_scores[0].device, with_stride=True)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
flatten_priors = torch.cat(mlvl_priors)
flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)
if rescale:
flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor(
scale_factors).unsqueeze(1)
result_list = []
for img_id in range(len(img_metas)):
cls_scores = flatten_cls_scores[img_id]
score_factor = flatten_objectness[img_id]
bboxes = flatten_bboxes[img_id]
result_list.append(
self._bboxes_nms(cls_scores, bboxes, score_factor, cfg))
return result_list
def _bbox_decode(self, priors, bbox_preds):
xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2]
whs = bbox_preds[..., 2:].exp() * priors[:, 2:]
tl_x = (xys[..., 0] - whs[..., 0] / 2)
tl_y = (xys[..., 1] - whs[..., 1] / 2)
br_x = (xys[..., 0] + whs[..., 0] / 2)
br_y = (xys[..., 1] + whs[..., 1] / 2)
decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
return decoded_bboxes
def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg):
max_scores, labels = torch.max(cls_scores, 1)
valid_mask = score_factor * max_scores >= cfg.score_thr
bboxes = bboxes[valid_mask]
scores = max_scores[valid_mask] * score_factor[valid_mask]
labels = labels[valid_mask]
if labels.numel() == 0:
return bboxes, labels
else:
dets, keep = batched_nms(bboxes, scores, labels, cfg.nms)
return dets, labels[keep]
def loss(self,
cls_scores,
bbox_preds,
objectnesses,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
"""
num_imgs = len(img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, cls_scores[0].device, with_stride=True)
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_pred in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_objectness = torch.cat(flatten_objectness, dim=1)
flatten_priors = torch.cat(mlvl_priors)
flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)
(pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets,
num_fg_imgs) = multi_apply(
self._get_target_single, flatten_cls_preds.detach(),
flatten_objectness.detach(),
flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1),
flatten_bboxes.detach(), gt_bboxes, gt_labels)
num_total_samples = max(sum(num_fg_imgs), 1)
pos_masks = torch.cat(pos_masks, 0)
cls_targets = torch.cat(cls_targets, 0)
obj_targets = torch.cat(obj_targets, 0)
bbox_targets = torch.cat(bbox_targets, 0)
if self.use_l1:
l1_targets = torch.cat(l1_targets, 0)
loss_bbox = self.loss_bbox(
flatten_bboxes.view(-1, 4)[pos_masks],
bbox_targets) / num_total_samples
loss_obj = self.loss_obj(flatten_objectness.view(-1, 1),
obj_targets) / num_total_samples
loss_cls = self.loss_cls(
flatten_cls_preds.view(-1, self.num_classes)[pos_masks],
cls_targets) / num_total_samples
loss_dict = dict(
loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj)
if self.use_l1:
loss_l1 = self.loss_l1(
flatten_bbox_preds.view(-1, 4)[pos_masks],
l1_targets) / num_total_samples
loss_dict.update(loss_l1=loss_l1)
return loss_dict
@torch.no_grad()
def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes,
gt_bboxes, gt_labels):
"""Compute classification, regression, and objectness targets for
priors in a single image.
Args:
cls_preds (Tensor): Classification predictions of one image,
a 2D-Tensor with shape [num_priors, num_classes]
objectness (Tensor): Objectness predictions of one image,
a 1D-Tensor with shape [num_priors]
priors (Tensor): All priors of one image, a 2D-Tensor with shape
[num_priors, 4] in [cx, xy, stride_w, stride_y] format.
decoded_bboxes (Tensor): Decoded bboxes predictions of one image,
a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,
br_x, br_y] format.
gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor
with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth labels of one image, a Tensor
with shape [num_gts].
"""
num_priors = priors.size(0)
num_gts = gt_labels.size(0)
gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype)
# No target
if num_gts == 0:
cls_target = cls_preds.new_zeros((0, self.num_classes))
bbox_target = cls_preds.new_zeros((0, 4))
l1_target = cls_preds.new_zeros((0, 4))
obj_target = cls_preds.new_zeros((num_priors, 1))
foreground_mask = cls_preds.new_zeros(num_priors).bool()
return (foreground_mask, cls_target, obj_target, bbox_target,
l1_target, 0)
# YOLOX uses center priors with 0.5 offset to assign targets,
# but use center priors without offset to regress bboxes.
offset_priors = torch.cat(
[priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1)
assign_result = self.assigner.assign(
cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(),
offset_priors, decoded_bboxes, gt_bboxes, gt_labels)
sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes)
pos_inds = sampling_result.pos_inds
num_pos_per_img = pos_inds.size(0)
pos_ious = assign_result.max_overlaps[pos_inds]
# IOU aware classification score
cls_target = F.one_hot(sampling_result.pos_gt_labels,
self.num_classes) * pos_ious.unsqueeze(-1)
obj_target = torch.zeros_like(objectness).unsqueeze(-1)
obj_target[pos_inds] = 1
bbox_target = sampling_result.pos_gt_bboxes
l1_target = cls_preds.new_zeros((num_pos_per_img, 4))
if self.use_l1:
l1_target = self._get_l1_target(l1_target, bbox_target,
priors[pos_inds])
foreground_mask = torch.zeros_like(objectness).to(torch.bool)
foreground_mask[pos_inds] = 1
return (foreground_mask, cls_target, obj_target, bbox_target,
l1_target, num_pos_per_img)
def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8):
"""Convert gt bboxes to center offset and log width height."""
gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes)
l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:]
l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps)
return l1_target
| 1 | 25,625 | Why delete this? | open-mmlab-mmdetection | py |
@@ -10,6 +10,17 @@ export function extend(obj, props) {
return obj;
}
+/** Invoke or update a ref, depending on whether it is a function or object ref.
+ * @param {object|function} [ref=null]
+ * @param {any} [value]
+ */
+export function applyRef(ref, value) {
+ if (ref!=null) {
+ if (typeof ref=='function') ref(value);
+ else ref.current = value;
+ }
+}
+
/**
* Call a function asynchronously, as soon as possible. Makes
* use of HTML Promise to schedule the callback if available, | 1 | /**
* Copy all properties from `props` onto `obj`.
* @param {object} obj Object onto which properties should be copied.
* @param {object} props Object from which to copy properties.
* @returns {object}
* @private
*/
export function extend(obj, props) {
for (let i in props) obj[i] = props[i];
return obj;
}
/**
* Call a function asynchronously, as soon as possible. Makes
* use of HTML Promise to schedule the callback if available,
* otherwise falling back to `setTimeout` (mainly for IE<11).
* @type {(callback: function) => void}
*/
export const defer = typeof Promise=='function' ? Promise.resolve().then.bind(Promise.resolve()) : setTimeout;
| 1 | 12,081 | This line is here to be compatible with the current way `refs` work, right? | preactjs-preact | js |
@@ -10,7 +10,11 @@
<strong><%= name %>
</td>
<td>
- <%= value %>
+ <% if value.class == BigDecimal %>
+ <%= number_to_currency(value) %>
+ <% else %>
+ <%= value %>
+ <% end %>
</td>
</tr>
<%- end %> | 1 | <table width="100%" class="data_container cart_properties table">
<tr class='header'>
<td class="first" width="33%" scope="col" colspan="2">
<h5>FY15 Credit Card Purchase Request</h5>
</td>
</tr>
<%- proposal.fields_for_display.each do |name, value| %>
<tr class="cart_item_information">
<td class="results-list" align="left" scope="row">
<strong><%= name %>
</td>
<td>
<%= value %>
</td>
</tr>
<%- end %>
</table>
| 1 | 12,915 | Hmm, I wonder if we can safely assume all decimals should be displayed as $$...ok for now I suppose. | 18F-C2 | rb |
@@ -443,4 +443,17 @@ describe RSpec::Core::Example, :parent_metadata => 'sample' do
expect(ex.description).to match(/contains the example/)
end
end
+
+ describe "setting the current example" do
+ it "sets RSpec.current_example to the example that is currently running" do
+ group = RSpec::Core::ExampleGroup.describe("an example group")
+
+ current_examples = []
+ example1 = group.example("example 1") { current_examples << RSpec.current_example }
+ example2 = group.example("example 2") { current_examples << RSpec.current_example }
+
+ group.run
+ expect(current_examples).to eq([example1, example2])
+ end
+ end
end | 1 | require 'spec_helper'
require 'pp'
require 'stringio'
describe RSpec::Core::Example, :parent_metadata => 'sample' do
let(:example_group) do
RSpec::Core::ExampleGroup.describe('group description')
end
let(:example_instance) do
example_group.example('example description') { }
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
example = example_group.example('example description', *args)
example.metadata
end
end
def capture_stdout
orig_stdout = $stdout
$stdout = StringIO.new
yield
$stdout.string
ensure
$stdout = orig_stdout
end
it "can be pretty printed" do
output = ignoring_warnings { capture_stdout { pp example_instance } }
expect(output).to include("RSpec::Core::Example")
end
describe "#exception" do
it "supplies the first exception raised, if any" do
example = example_group.example { raise "first" }
example_group.after { raise "second" }
example_group.run
expect(example.exception.message).to eq("first")
end
it "returns nil if there is no exception" do
example = example_group.example('example') { }
example_group.run
expect(example.exception).to be_nil
end
it "returns false for pending_fixed? if not pending fixed" do
example = example_group.example { fail }
example_group.run
expect(example.exception).not_to be_pending_fixed
end
it "returns true for pending_fixed? if pending fixed" do
example = example_group.example do
pending("fixed") {}
end
example_group.run
expect(example.exception).to be_pending_fixed
end
end
describe "when there is an explicit description" do
context "when RSpec.configuration.format_docstrings is set to a block" do
it "formats the description using the block" do
RSpec.configuration.format_docstrings { |s| s.strip }
example = example_group.example(' an example with whitespace ') {}
example_group.run
expect(example.description).to eql('an example with whitespace')
end
end
end
describe "when there is no explicit description" do
def expect_with(*frameworks)
RSpec.configuration.stub(:expecting_with_rspec?).and_return(frameworks.include?(:rspec))
if frameworks.include?(:stdlib)
example_group.class_eval do
def assert(val)
raise "Expected #{val} to be true" unless val
end
end
end
end
context "when RSpec.configuration.format_docstrings is set to a block" do
it "formats the description using the block" do
RSpec.configuration.format_docstrings { |s| s.upcase }
example_group.example { expect(5).to eq(5) }
example_group.run
pattern = /EXAMPLE AT #{relative_path(__FILE__).upcase}:#{__LINE__ - 2}/
expect(example_group.examples.first.description).to match(pattern)
end
end
context "when `expect_with :rspec` is configured" do
before(:each) { expect_with :rspec }
it "uses the matcher-generated description" do
example_group.example { expect(5).to eq(5) }
example_group.run
expect(example_group.examples.first.description).to eq("should eq 5")
end
it "uses the matcher-generated description in the full description" do
example_group.example { expect(5).to eq(5) }
example_group.run
expect(example_group.examples.first.full_description).to eq("group description should eq 5")
end
it "uses the file and line number if there is no matcher-generated description" do
example = example_group.example {}
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
it "uses the file and line number if there is an error before the matcher" do
example = example_group.example { expect(5).to eq(5) }
example_group.before { raise }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 3}/)
end
end
context "when `expect_with :rspec, :stdlib` is configured" do
before(:each) { expect_with :rspec, :stdlib }
it "uses the matcher-generated description" do
example_group.example { expect(5).to eq(5) }
example_group.run
expect(example_group.examples.first.description).to eq("should eq 5")
end
it "uses the file and line number if there is no matcher-generated description" do
example = example_group.example {}
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
it "uses the file and line number if there is an error before the matcher" do
example = example_group.example { expect(5).to eq(5) }
example_group.before { raise }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 3}/)
end
end
context "when `expect_with :stdlib` is configured" do
before(:each) { expect_with :stdlib }
it "does not attempt to get the generated description from RSpec::Matchers" do
RSpec::Matchers.should_not_receive(:generated_description)
example_group.example { assert 5 == 5 }
example_group.run
end
it "uses the file and line number" do
example = example_group.example { assert 5 == 5 }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
end
end
describe "#described_class" do
it "returns the class (if any) of the outermost example group" do
expect(described_class).to eq(RSpec::Core::Example)
end
end
describe "accessing metadata within a running example" do
it "has a reference to itself when running" do |ex|
expect(ex.description).to eq("has a reference to itself when running")
end
it "can access the example group's top level metadata as if it were its own" do |ex|
expect(ex.example_group.metadata).to include(:parent_metadata => 'sample')
expect(ex.metadata).to include(:parent_metadata => 'sample')
end
end
describe "accessing options within a running example" do
it "can look up option values by key", :demo => :data do |ex|
expect(ex.metadata[:demo]).to eq(:data)
end
end
describe "#run" do
it "sets its reference to the example group instance to nil" do
group = RSpec::Core::ExampleGroup.describe do
example('example') { expect(1).to eq(1) }
end
group.run
expect(group.examples.first.instance_variable_get("@example_group_instance")).to be_nil
end
it "runs after(:each) when the example passes" do
after_run = false
group = RSpec::Core::ExampleGroup.describe do
after(:each) { after_run = true }
example('example') { expect(1).to eq(1) }
end
group.run
expect(after_run).to be_true, "expected after(:each) to be run"
end
it "runs after(:each) when the example fails" do
after_run = false
group = RSpec::Core::ExampleGroup.describe do
after(:each) { after_run = true }
example('example') { expect(1).to eq(2) }
end
group.run
expect(after_run).to be_true, "expected after(:each) to be run"
end
it "runs after(:each) when the example raises an Exception" do
after_run = false
group = RSpec::Core::ExampleGroup.describe do
after(:each) { after_run = true }
example('example') { raise "this error" }
end
group.run
expect(after_run).to be_true, "expected after(:each) to be run"
end
context "with an after(:each) that raises" do
it "runs subsequent after(:each)'s" do
after_run = false
group = RSpec::Core::ExampleGroup.describe do
after(:each) { after_run = true }
after(:each) { raise "FOO" }
example('example') { expect(1).to eq(1) }
end
group.run
expect(after_run).to be_true, "expected after(:each) to be run"
end
it "stores the exception" do
group = RSpec::Core::ExampleGroup.describe
group.after(:each) { raise "FOO" }
example = group.example('example') { expect(1).to eq(1) }
group.run
expect(example.metadata[:execution_result][:exception].message).to eq("FOO")
end
end
it "wraps before/after(:each) inside around" do
results = []
group = RSpec::Core::ExampleGroup.describe do
around(:each) do |e|
results << "around (before)"
e.run
results << "around (after)"
end
before(:each) { results << "before" }
after(:each) { results << "after" }
example { results << "example" }
end
group.run
expect(results).to eq([
"around (before)",
"before",
"example",
"after",
"around (after)"
])
end
context "clearing ivars" do
it "sets ivars to nil to prep them for GC" do
group = RSpec::Core::ExampleGroup.describe do
before(:all) { @before_all = :before_all }
before(:each) { @before_each = :before_each }
after(:each) { @after_each = :after_each }
after(:all) { @after_all = :after_all }
end
group.example("does something") do
expect(@before_all).to eq(:before_all)
expect(@before_each).to eq(:before_each)
end
expect(group.run(double.as_null_object)).to be_true
group.new do |example|
%w[@before_all @before_each @after_each @after_all].each do |ivar|
expect(example.instance_variable_get(ivar)).to be_nil
end
end
end
it "does not impact the before_all_ivars which are copied to each example" do
group = RSpec::Core::ExampleGroup.describe do
before(:all) { @before_all = "abc" }
example("first") { expect(@before_all).not_to be_nil }
example("second") { expect(@before_all).not_to be_nil }
end
expect(group.run).to be_true
end
end
context 'when the example raises an error' do
def run_and_capture_reported_message(group)
reported_msg = nil
# We can't use should_receive(:message).with(/.../) here,
# because if that fails, it would fail within our example-under-test,
# and since there's already two errors, it would just be reported again.
RSpec.configuration.reporter.stub(:message) { |msg| reported_msg = msg }
group.run
reported_msg
end
it "prints any around hook errors rather than silencing them" do
group = RSpec::Core::ExampleGroup.describe do
around(:each) { |e| e.run; raise "around" }
example("e") { raise "example" }
end
message = run_and_capture_reported_message(group)
expect(message).to match(/An error occurred in an around.* hook/i)
end
it "prints any after hook errors rather than silencing them" do
group = RSpec::Core::ExampleGroup.describe do
after(:each) { raise "after" }
example("e") { raise "example" }
end
message = run_and_capture_reported_message(group)
expect(message).to match(/An error occurred in an after.* hook/i)
end
it "does not print mock expectation errors" do
group = RSpec::Core::ExampleGroup.describe do
example do
foo = double
foo.should_receive(:bar)
raise "boom"
end
end
message = run_and_capture_reported_message(group)
expect(message).to be_nil
end
end
end
describe "#pending" do
context "in the example" do
it "sets the example to pending" do
group = RSpec::Core::ExampleGroup.describe do
example { pending }
end
group.run
expect(group.examples.first).to be_pending
end
it "allows post-example processing in around hooks (see https://github.com/rspec/rspec-core/issues/322)" do
blah = nil
group = RSpec::Core::ExampleGroup.describe do
around do |example|
example.run
blah = :success
end
example { pending }
end
group.run
expect(blah).to be(:success)
end
end
context "in before(:each)" do
it "sets each example to pending" do
group = RSpec::Core::ExampleGroup.describe do
before(:each) { pending }
example {}
example {}
end
group.run
expect(group.examples.first).to be_pending
expect(group.examples.last).to be_pending
end
end
context "in before(:all)" do
it "sets each example to pending" do
group = RSpec::Core::ExampleGroup.describe do
before(:all) { pending }
example {}
example {}
end
group.run
expect(group.examples.first).to be_pending
expect(group.examples.last).to be_pending
end
end
context "in around(:each)" do
it "sets the example to pending" do
group = RSpec::Core::ExampleGroup.describe do
around(:each) { pending }
example {}
end
group.run
expect(group.examples.first).to be_pending
end
end
end
describe "timing" do
it "uses RSpec::Core::Time as to not be affected by changes to time in examples" do
reporter = double(:reporter).as_null_object
group = RSpec::Core::ExampleGroup.describe
example = group.example
example.__send__ :start, reporter
Time.stub(:now => Time.utc(2012, 10, 1))
example.__send__ :finish, reporter
expect(example.metadata[:execution_result][:run_time]).to be < 0.2
end
end
it "does not interfere with per-example randomness when running examples in a random order" do
values = []
RSpec.configuration.order = :random
RSpec::Core::ExampleGroup.describe do
# The bug was only triggered when the examples
# were in nested contexts; see https://github.com/rspec/rspec-core/pull/837
context { example { values << rand } }
context { example { values << rand } }
end.run
expect(values.uniq).to have(2).values
end
describe "optional block argument" do
it "contains the example" do |ex|
expect(ex).to be_an(RSpec::Core::Example)
expect(ex.description).to match(/contains the example/)
end
end
end
| 1 | 9,481 | Good spec :). Very clear and easy to see what it's doing. | rspec-rspec-core | rb |
@@ -0,0 +1,12 @@
+class ExploreController < ApplicationController
+ def orgs
+ @newest_orgs = Organization.active.order('created_at DESC').limit(3)
+ @most_active_orgs = OrgThirtyDayActivity.most_active_orgs
+ @stats_by_sector = OrgStatsBySector.recent
+ @org_by_30_day_commits = OrgThirtyDayActivity.send("filter_#{params[:filter]}_orgs")
+ end
+
+ def orgs_by_thirty_day_commit_volume
+ @org_by_30_day_commits = OrgThirtyDayActivity.send("filter_#{params[:filter]}_orgs")
+ end
+end | 1 | 1 | 7,109 | Shouldn't we need to sanitize the `params[:filter]` from a defined expected values? | blackducksoftware-ohloh-ui | rb |
|
@@ -10,6 +10,18 @@ from dagster.core.launcher.base import LaunchRunContext, RunLauncher
from dagster.grpc.types import ExecuteRunArgs
from dagster.serdes import ConfigurableClass, serialize_dagster_namedtuple
from dagster.utils.backcompat import experimental
+from dagster.utils.backoff import backoff
+
+
+# The ECS API is eventually consistent:
+# https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html
+# describe_tasks might initially return nothing even if a task exists.
+class EcsEventualConsistencyTimeout(Exception):
+ pass
+
+
+# 9 retries polls for up to 51.1 seconds with exponential backoff.
+BACKOFF_RETRIES = 9
@dataclass | 1 | import os
import typing
from dataclasses import dataclass
import boto3
import dagster
import requests
from dagster import Field, check
from dagster.core.launcher.base import LaunchRunContext, RunLauncher
from dagster.grpc.types import ExecuteRunArgs
from dagster.serdes import ConfigurableClass, serialize_dagster_namedtuple
from dagster.utils.backcompat import experimental
@dataclass
class TaskMetadata:
cluster: str
subnets: typing.List[str]
security_groups: typing.List[str]
task_definition: typing.Dict[str, typing.Any]
container_definition: typing.Dict[str, typing.Any]
@experimental
class EcsRunLauncher(RunLauncher, ConfigurableClass):
def __init__(self, inst_data=None, task_definition=None, container_name="run"):
self._inst_data = inst_data
self.ecs = boto3.client("ecs")
self.ec2 = boto3.resource("ec2")
self.task_definition = task_definition
self.container_name = container_name
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=task_definition)
container_names = [
container.get("name")
for container in task_definition["taskDefinition"]["containerDefinitions"]
]
check.invariant(
container_name in container_names,
f"Cannot override container '{container_name}' in task definition "
f"'{self.task_definition}' because the container is not defined.",
)
self.task_definition = task_definition["taskDefinition"]["taskDefinitionArn"]
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {
"task_definition": Field(
dagster.String,
is_required=False,
description=(
"The task definition to use when launching new tasks. "
"If none is provided, each run will create its own task "
"definition."
),
),
"container_name": Field(
dagster.String,
is_required=False,
default_value="run",
description=(
"The container name to use when launching new tasks. Defaults to 'run'."
),
),
}
@staticmethod
def from_config_value(inst_data, config_value):
return EcsRunLauncher(inst_data=inst_data, **config_value)
def _set_ecs_tags(self, run_id, task_arn):
tags = [{"key": "dagster/run_id", "value": run_id}]
self.ecs.tag_resource(resourceArn=task_arn, tags=tags)
def _set_run_tags(self, run_id, task_arn):
cluster = self._task_metadata().cluster
tags = {"ecs/task_arn": task_arn, "ecs/cluster": cluster}
self._instance.add_run_tags(run_id, tags)
def _get_run_tags(self, run_id):
run = self._instance.get_run_by_id(run_id)
tags = run.tags if run else {}
arn = tags.get("ecs/task_arn")
cluster = tags.get("ecs/cluster")
return (arn, cluster)
def launch_run(self, context: LaunchRunContext) -> None:
"""
Launch a run in an ECS task.
Currently, Fargate is the only supported launchType and awsvpc is the
only supported networkMode. These are the defaults that are set up by
docker-compose when you use the Dagster ECS reference deployment.
"""
run = context.pipeline_run
metadata = self._task_metadata()
pipeline_origin = context.pipeline_code_origin
image = pipeline_origin.repository_origin.container_image
task_definition = self._task_definition(metadata, image)["family"]
input_json = serialize_dagster_namedtuple(
ExecuteRunArgs(
pipeline_origin=pipeline_origin,
pipeline_run_id=run.run_id,
instance_ref=self._instance.get_ref(),
)
)
command = ["dagster", "api", "execute_run", input_json]
# Run a task using the same network configuration as this processes's
# task.
response = self.ecs.run_task(
taskDefinition=task_definition,
cluster=metadata.cluster,
overrides={"containerOverrides": [{"name": self.container_name, "command": command}]},
networkConfiguration={
"awsvpcConfiguration": {
"subnets": metadata.subnets,
"assignPublicIp": "ENABLED",
"securityGroups": metadata.security_groups,
}
},
launchType="FARGATE",
)
arn = response["tasks"][0]["taskArn"]
self._set_run_tags(run.run_id, task_arn=arn)
self._set_ecs_tags(run.run_id, task_arn=arn)
self._instance.report_engine_event(
message=f"Launching run in task {arn} on cluster {metadata.cluster}",
pipeline_run=run,
cls=self.__class__,
)
def can_terminate(self, run_id):
arn, cluster = self._get_run_tags(run_id)
if not (arn and cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[arn], cluster=cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status and status != "STOPPED":
return True
return False
def terminate(self, run_id):
arn, cluster = self._get_run_tags(run_id)
if not (arn and cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[arn], cluster=cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status == "STOPPED":
return False
self.ecs.stop_task(task=arn, cluster=cluster)
return True
def _task_definition(self, metadata, image):
"""
Return the launcher's default task definition if it's configured.
Otherwise, a new task definition revision is registered for every run.
First, the process that calls this method finds its own task
definition. Next, it creates a new task definition based on its own
but it overrides the image with the pipeline origin's image.
"""
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=self.task_definition)
return task_definition["taskDefinition"]
# Start with the current process's task's definition but remove
# extra keys that aren't useful for creating a new task definition
# (status, revision, etc.)
expected_keys = [
key
for key in self.ecs.meta.service_model.shape_for(
"RegisterTaskDefinitionRequest"
).members
]
task_definition = dict(
(key, metadata.task_definition[key])
for key in expected_keys
if key in metadata.task_definition.keys()
)
# The current process might not be running in a container that has the
# pipeline's code installed. Inherit most of the process's container
# definition (things like environment, dependencies, etc.) but replace
# the image with the pipeline origin's image and give it a new name.
# Also remove entryPoint. We plan to set containerOverrides. If both
# entryPoint and containerOverrides are specified, they're concatenated
# and the command will fail
# https://aws.amazon.com/blogs/opensource/demystifying-entrypoint-cmd-docker/
container_definitions = task_definition["containerDefinitions"]
container_definitions.remove(metadata.container_definition)
container_definitions.append(
{
**metadata.container_definition,
"name": self.container_name,
"image": image,
"entryPoint": [],
}
)
task_definition = {
**task_definition,
"family": "dagster-run",
"containerDefinitions": container_definitions,
}
# Register the task overridden task definition as a revision to the
# "dagster-run" family.
# TODO: Only register the task definition if a matching one doesn't
# already exist. Otherwise, we risk exhausting the revisions limit
# (1,000,000 per family) with unnecessary revisions:
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-quotas.html
self.ecs.register_task_definition(**task_definition)
return task_definition
def _task_metadata(self):
"""
ECS injects an environment variable into each Fargate task. The value
of this environment variable is a url that can be queried to introspect
information about the current processes's running task:
https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-metadata-endpoint-v4-fargate.html
"""
container_metadata_uri = os.environ.get("ECS_CONTAINER_METADATA_URI_V4")
name = requests.get(container_metadata_uri).json()["Name"]
task_metadata_uri = container_metadata_uri + "/task"
response = requests.get(task_metadata_uri).json()
cluster = response.get("Cluster")
task_arn = response.get("TaskARN")
task = self.ecs.describe_tasks(tasks=[task_arn], cluster=cluster)["tasks"][0]
enis = []
subnets = []
for attachment in task["attachments"]:
if attachment["type"] == "ElasticNetworkInterface":
for detail in attachment["details"]:
if detail["name"] == "subnetId":
subnets.append(detail["value"])
if detail["name"] == "networkInterfaceId":
enis.append(self.ec2.NetworkInterface(detail["value"]))
security_groups = []
for eni in enis:
for group in eni.groups:
security_groups.append(group["GroupId"])
task_definition_arn = task["taskDefinitionArn"]
task_definition = self.ecs.describe_task_definition(taskDefinition=task_definition_arn)[
"taskDefinition"
]
container_definition = next(
iter(
[
container
for container in task_definition["containerDefinitions"]
if container["name"] == name
]
)
)
return TaskMetadata(
cluster=cluster,
subnets=subnets,
security_groups=security_groups,
task_definition=task_definition,
container_definition=container_definition,
)
| 1 | 14,571 | Should we bite the bullet and poll for the full 5 minutes that AWS recommends? If we do that, we'll probably want to leave some kind of breadcrumb in the event log to let users know why it's taking so long to launch. | dagster-io-dagster | py |
@@ -36,7 +36,7 @@ import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.function.Supplier;
-import com.google.common.base.Objects;
+import com.google.common.base.MoreObjects;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.Aliases;
import org.apache.solr.common.params.CommonParams; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud.api.collections;
import java.text.ParseException;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
import java.time.temporal.ChronoField;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.function.Supplier;
import com.google.common.base.Objects;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.Aliases;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.MapSolrParams;
import org.apache.solr.common.params.RequiredSolrParams;
import org.apache.solr.util.DateMathParser;
import org.apache.solr.util.TimeZoneUtils;
import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
import static org.apache.solr.common.params.CommonParams.TZ;
/**
* Holds configuration for a routed alias, and some common code and constants.
*
* @see CreateAliasCmd
* @see MaintainRoutedAliasCmd
* @see org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor
*/
public class TimeRoutedAlias {
// These are parameter names to routed alias creation, AND are stored as metadata with the alias.
public static final String ROUTER_PREFIX = "router.";
public static final String ROUTER_TYPE_NAME = ROUTER_PREFIX + "name";
public static final String ROUTER_FIELD = ROUTER_PREFIX + "field";
public static final String ROUTER_START = ROUTER_PREFIX + "start";
public static final String ROUTER_INTERVAL = ROUTER_PREFIX + "interval";
public static final String ROUTER_MAX_FUTURE = ROUTER_PREFIX + "maxFutureMs";
public static final String ROUTER_PREEMPTIVE_CREATE_MATH = ROUTER_PREFIX + "preemptiveCreateMath";
public static final String ROUTER_AUTO_DELETE_AGE = ROUTER_PREFIX + "autoDeleteAge";
public static final String CREATE_COLLECTION_PREFIX = "create-collection.";
// plus TZ and NAME
/**
* Parameters required for creating a routed alias
*/
public static final List<String> REQUIRED_ROUTER_PARAMS = Collections.unmodifiableList(Arrays.asList(
CommonParams.NAME,
ROUTER_TYPE_NAME,
ROUTER_FIELD,
ROUTER_START,
ROUTER_INTERVAL));
/**
* Optional parameters for creating a routed alias excluding parameters for collection creation.
*/
//TODO lets find a way to remove this as it's harder to maintain than required list
public static final List<String> OPTIONAL_ROUTER_PARAMS = Collections.unmodifiableList(Arrays.asList(
ROUTER_MAX_FUTURE,
ROUTER_AUTO_DELETE_AGE,
ROUTER_PREEMPTIVE_CREATE_MATH,
TZ)); // kinda special
static Predicate<String> PARAM_IS_PROP =
key -> key.equals(TZ) ||
(key.startsWith(ROUTER_PREFIX) && !key.equals(ROUTER_START)) || //TODO reconsider START special case
key.startsWith(CREATE_COLLECTION_PREFIX);
public static final String ROUTED_ALIAS_NAME_CORE_PROP = "routedAliasName"; // core prop
// This format must be compatible with collection name limitations
private static final DateTimeFormatter DATE_TIME_FORMATTER = new DateTimeFormatterBuilder()
.append(DateTimeFormatter.ISO_LOCAL_DATE).appendPattern("[_HH[_mm[_ss]]]") //brackets mean optional
.parseDefaulting(ChronoField.HOUR_OF_DAY, 0)
.parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0)
.parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0)
.toFormatter(Locale.ROOT).withZone(ZoneOffset.UTC); // deliberate -- collection names disregard TZ
public static Instant parseInstantFromCollectionName(String aliasName, String collection) {
final String dateTimePart = collection.substring(aliasName.length() + 1);
return DATE_TIME_FORMATTER.parse(dateTimePart, Instant::from);
}
public static String formatCollectionNameFromInstant(String aliasName, Instant timestamp) {
String nextCollName = DATE_TIME_FORMATTER.format(timestamp);
for (int i = 0; i < 3; i++) { // chop off seconds, minutes, hours
if (nextCollName.endsWith("_00")) {
nextCollName = nextCollName.substring(0, nextCollName.length()-3);
}
}
assert DATE_TIME_FORMATTER.parse(nextCollName, Instant::from).equals(timestamp);
return aliasName + "_" + nextCollName;
}
//
// Instance data and methods
//
private final String aliasName;
private final String routeField;
private final String intervalMath; // ex: +1DAY
private final long maxFutureMs;
private final String preemptiveCreateMath;
private final String autoDeleteAgeMath; // ex: /DAY-30DAYS *optional*
private final TimeZone timeZone;
public TimeRoutedAlias(String aliasName, Map<String, String> aliasMetadata) {
this.aliasName = aliasName;
final MapSolrParams params = new MapSolrParams(aliasMetadata); // for convenience
final RequiredSolrParams required = params.required();
if (!"time".equals(required.get(ROUTER_TYPE_NAME))) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only 'time' routed aliases is supported right now.");
}
routeField = required.get(ROUTER_FIELD);
intervalMath = required.get(ROUTER_INTERVAL);
//optional:
maxFutureMs = params.getLong(ROUTER_MAX_FUTURE, TimeUnit.MINUTES.toMillis(10));
// the date math configured is an interval to be subtracted from the most recent collection's time stamp
String pcmTmp = params.get(ROUTER_PREEMPTIVE_CREATE_MATH);
preemptiveCreateMath = pcmTmp != null ? (pcmTmp.startsWith("-") ? pcmTmp : "-" + pcmTmp) : null;
autoDeleteAgeMath = params.get(ROUTER_AUTO_DELETE_AGE); // no default
timeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
// More validation:
// check that the date math is valid
final Date now = new Date();
try {
final Date after = new DateMathParser(now, timeZone).parseMath(intervalMath);
if (!after.after(now)) {
throw new SolrException(BAD_REQUEST, "duration must add to produce a time in the future");
}
} catch (Exception e) {
throw new SolrException(BAD_REQUEST, "bad " + TimeRoutedAlias.ROUTER_INTERVAL + ", " + e, e);
}
if (autoDeleteAgeMath != null) {
try {
final Date before = new DateMathParser(now, timeZone).parseMath(autoDeleteAgeMath);
if (now.before(before)) {
throw new SolrException(BAD_REQUEST, "duration must round or subtract to produce a time in the past");
}
} catch (Exception e) {
throw new SolrException(BAD_REQUEST, "bad " + TimeRoutedAlias.ROUTER_AUTO_DELETE_AGE + ", " + e, e);
}
}
if (preemptiveCreateMath != null) {
try {
new DateMathParser().parseMath(preemptiveCreateMath);
} catch (ParseException e) {
throw new SolrException(BAD_REQUEST, "Invalid date math for preemptiveCreateMath:" + preemptiveCreateMath);
}
}
if (maxFutureMs < 0) {
throw new SolrException(BAD_REQUEST, ROUTER_MAX_FUTURE + " must be >= 0");
}
}
public String getAliasName() {
return aliasName;
}
public String getRouteField() {
return routeField;
}
public String getIntervalMath() {
return intervalMath;
}
public long getMaxFutureMs() {
return maxFutureMs;
}
public String getPreemptiveCreateWindow() {
return preemptiveCreateMath;
}
public String getAutoDeleteAgeMath() {
return autoDeleteAgeMath;
}
public TimeZone getTimeZone() {
return timeZone;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("aliasName", aliasName)
.add("routeField", routeField)
.add("intervalMath", intervalMath)
.add("maxFutureMs", maxFutureMs)
.add("preemptiveCreateMath", preemptiveCreateMath)
.add("autoDeleteAgeMath", autoDeleteAgeMath)
.add("timeZone", timeZone)
.toString();
}
/** Parses the timestamp from the collection list and returns them in reverse sorted order (most recent 1st) */
public List<Map.Entry<Instant,String>> parseCollections(Aliases aliases, Supplier<SolrException> aliasNotExist) {
final List<String> collections = aliases.getCollectionAliasListMap().get(aliasName);
if (collections == null) {
throw aliasNotExist.get();
}
// note: I considered TreeMap but didn't like the log(N) just to grab the most recent when we use it later
List<Map.Entry<Instant,String>> result = new ArrayList<>(collections.size());
for (String collection : collections) {
Instant colStartTime = parseInstantFromCollectionName(aliasName, collection);
result.add(new AbstractMap.SimpleImmutableEntry<>(colStartTime, collection));
}
result.sort((e1, e2) -> e2.getKey().compareTo(e1.getKey())); // reverse sort by key
return result;
}
/** Computes the timestamp of the next collection given the timestamp of the one before. */
public Instant computeNextCollTimestamp(Instant fromTimestamp) {
final Instant nextCollTimestamp =
DateMathParser.parseMath(Date.from(fromTimestamp), "NOW" + intervalMath, timeZone).toInstant();
assert nextCollTimestamp.isAfter(fromTimestamp);
return nextCollTimestamp;
}
}
| 1 | 28,460 | Guava changed `Objects` -> `MoreObjects` | apache-lucene-solr | java |
@@ -20,8 +20,11 @@
package mocks
-import mock "github.com/stretchr/testify/mock"
-import persistence "github.com/temporalio/temporal/common/persistence"
+import (
+ mock "github.com/stretchr/testify/mock"
+
+ persistence "github.com/temporalio/temporal/common/persistence"
+)
// MetadataManager is an autogenerated mock type for the MetadataManager type
type MetadataManager struct { | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package mocks
import mock "github.com/stretchr/testify/mock"
import persistence "github.com/temporalio/temporal/common/persistence"
// MetadataManager is an autogenerated mock type for the MetadataManager type
type MetadataManager struct {
mock.Mock
}
// GetName provides a mock function with given fields:
func (_m *MetadataManager) GetName() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// Close provides a mock function with given fields:
func (_m *MetadataManager) Close() {
_m.Called()
}
// CreateDomain provides a mock function with given fields: request
func (_m *MetadataManager) CreateDomain(request *persistence.CreateDomainRequest) (*persistence.CreateDomainResponse, error) {
ret := _m.Called(request)
var r0 *persistence.CreateDomainResponse
if rf, ok := ret.Get(0).(func(*persistence.CreateDomainRequest) *persistence.CreateDomainResponse); ok {
r0 = rf(request)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*persistence.CreateDomainResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*persistence.CreateDomainRequest) error); ok {
r1 = rf(request)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteDomain provides a mock function with given fields: request
func (_m *MetadataManager) DeleteDomain(request *persistence.DeleteDomainRequest) error {
ret := _m.Called(request)
var r0 error
if rf, ok := ret.Get(0).(func(*persistence.DeleteDomainRequest) error); ok {
r0 = rf(request)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteDomainByName provides a mock function with given fields: request
func (_m *MetadataManager) DeleteDomainByName(request *persistence.DeleteDomainByNameRequest) error {
ret := _m.Called(request)
var r0 error
if rf, ok := ret.Get(0).(func(*persistence.DeleteDomainByNameRequest) error); ok {
r0 = rf(request)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetDomain provides a mock function with given fields: request
func (_m *MetadataManager) GetDomain(request *persistence.GetDomainRequest) (*persistence.GetDomainResponse, error) {
ret := _m.Called(request)
var r0 *persistence.GetDomainResponse
if rf, ok := ret.Get(0).(func(*persistence.GetDomainRequest) *persistence.GetDomainResponse); ok {
r0 = rf(request)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*persistence.GetDomainResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*persistence.GetDomainRequest) error); ok {
r1 = rf(request)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateDomain provides a mock function with given fields: request
func (_m *MetadataManager) UpdateDomain(request *persistence.UpdateDomainRequest) error {
ret := _m.Called(request)
var r0 error
if rf, ok := ret.Get(0).(func(*persistence.UpdateDomainRequest) error); ok {
r0 = rf(request)
} else {
r0 = ret.Error(0)
}
return r0
}
// ListDomains provides a mock function with given fields: request
func (_m *MetadataManager) ListDomains(request *persistence.ListDomainsRequest) (*persistence.ListDomainsResponse, error) {
ret := _m.Called(request)
var r0 *persistence.ListDomainsResponse
if rf, ok := ret.Get(0).(func(*persistence.ListDomainsRequest) *persistence.ListDomainsResponse); ok {
r0 = rf(request)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*persistence.ListDomainsResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*persistence.ListDomainsRequest) error); ok {
r1 = rf(request)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetMetadata provides a mock function with given fields: request
func (_m *MetadataManager) GetMetadata() (*persistence.GetMetadataResponse, error) {
ret := _m.Called()
var r0 *persistence.GetMetadataResponse
if rf, ok := ret.Get(0).(func() *persistence.GetMetadataResponse); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*persistence.GetMetadataResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
| 1 | 9,144 | All these hand generated mocks can be deleted. I think we already moved everything to gomocks. | temporalio-temporal | go |
@@ -1251,7 +1251,7 @@ Document.prototype.inspect = function (options) {
*/
Document.prototype.equals = function (doc) {
- return this.get('_id') === doc.get('_id');
+ return this.get('_id').equals(doc.get('_id'));
};
/** | 1 | /**
* Module dependencies.
*/
var EventEmitter = require('events').EventEmitter
, MongooseError = require('./error')
, MixedSchema = require('./schema/mixed')
, Schema = require('./schema')
, ValidatorError = require('./schematype').ValidatorError
, utils = require('./utils')
, clone = utils.clone
, isMongooseObject = utils.isMongooseObject
, inspect = require('util').inspect
, StateMachine = require('./statemachine')
, ActiveRoster = StateMachine.ctor('require', 'modify', 'init', 'default')
, deepEqual = utils.deepEqual
, hooks = require('hooks')
, DocumentArray
/**
* Document constructor.
*
* @param {Object} values to set
* @api private
*/
function Document (obj, fields, skipId) {
// node <0.4.3 bug
if (!this._events) this._events = {};
this.setMaxListeners(0);
if ('boolean' === typeof fields) {
this._strictMode = fields;
this._selected = fields = undefined;
} else {
this._strictMode = this.schema.options && this.schema.options.strict;
this._selected = fields;
}
this.isNew = true;
this.errors = undefined;
this._shardval = undefined;
this._saveError = undefined;
this._validationError = undefined;
this._adhocPaths = undefined;
this._removing = undefined;
this._inserting = undefined;
this.__version = undefined;
this.__getters = {};
this.__id = undefined;
this._activePaths = new ActiveRoster;
var required = this.schema.requiredPaths();
for (var i = 0; i < required.length; ++i) {
this._activePaths.require(required[i]);
}
this._doc = this.buildDoc(obj, fields, skipId);
if (obj) this.set(obj, undefined, true);
this._registerHooks();
};
/**
* Inherit from EventEmitter.
*/
Document.prototype.__proto__ = EventEmitter.prototype;
/**
* Base Mongoose instance for the model. Set by the Mongoose instance upon
* pre-compilation.
*
* @api public
*/
Document.prototype.base;
/**
* Document schema as a nested structure.
*
* @api public
*/
Document.prototype.schema;
/**
* Whether the document is new.
*
* @api public
*/
Document.prototype.isNew;
/**
* Validation errors.
*
* @api public
*/
Document.prototype.errors;
/**
* Builds the default doc structure
*
* @api private
*/
Document.prototype.buildDoc = function (obj, fields, skipId) {
var doc = {}
, self = this
, exclude
, keys
, key
, ki
// determine if this doc is a result of a query with
// excluded fields
if (fields && 'Object' === fields.constructor.name) {
keys = Object.keys(fields);
ki = keys.length;
while (ki--) {
if ('_id' !== keys[ki]) {
exclude = 0 === fields[keys[ki]];
break;
}
}
}
var paths = Object.keys(this.schema.paths)
, plen = paths.length
, ii = 0
for (; ii < plen; ++ii) {
var p = paths[ii];
if ('_id' == p) {
if (skipId) continue;
if (obj && '_id' in obj) continue;
}
var type = this.schema.paths[p]
, path = p.split('.')
, len = path.length
, last = len-1
, doc_ = doc
, i = 0
for (; i < len; ++i) {
var piece = path[i]
, def
if (i === last) {
if (fields) {
if (exclude) {
// apply defaults to all non-excluded fields
if (p in fields) continue;
def = type.getDefault(self, true);
if ('undefined' !== typeof def) {
doc_[piece] = def;
self._activePaths.default(p);
}
} else if (p in fields) {
// selected field
def = type.getDefault(self, true);
if ('undefined' !== typeof def) {
doc_[piece] = def;
self._activePaths.default(p);
}
}
} else {
def = type.getDefault(self, true);
if ('undefined' !== typeof def) {
doc_[piece] = def;
self._activePaths.default(p);
}
}
} else {
doc_ = doc_[piece] || (doc_[piece] = {});
}
}
};
return doc;
};
/**
* init
*
* Initializes the document without setters or
* marking modified. Called internally after a
* document is returned from mongodb.
*
* @param {Object} document returned by mongo
* @param {Function} callback
* @api private
*/
Document.prototype.init = function (doc, fn) {
this.isNew = false;
init(this, doc, this._doc);
this._storeShard();
this.emit('init');
if (fn) fn(null);
return this;
};
/**
* Init helper.
* @param {Object} instance
* @param {Object} obj - raw mongodb doc
* @param {Object} doc - object we are initializing
* @private
*/
function init (self, obj, doc, prefix) {
prefix = prefix || '';
var keys = Object.keys(obj)
, len = keys.length
, schema
, path
, i;
while (len--) {
i = keys[len];
path = prefix + i;
schema = self.schema.path(path);
if (!schema && obj[i] && 'Object' === obj[i].constructor.name) {
// assume nested object
doc[i] = {};
init(self, obj[i], doc[i], path + '.');
} else {
if (obj[i] === null) {
doc[i] = null;
} else if (obj[i] !== undefined) {
if (schema) {
self.try(function(){
doc[i] = schema.cast(obj[i], self, true);
});
} else {
doc[i] = obj[i];
}
}
// mark as hydrated
self._activePaths.init(path);
}
}
};
/**
* _storeShard
*
* Stores the current values of the shard keys
* for use later in the doc.save() where clause.
*
* Shard key values do not / are not allowed to change.
*
* @param {Object} document
* @private
*/
Document.prototype._storeShard = function _storeShard () {
var key = this.schema.options.shardkey;
if (!(key && 'Object' == key.constructor.name)) return;
var orig = this._shardval = {}
, paths = Object.keys(key)
, len = paths.length
, val
for (var i = 0; i < len; ++i) {
val = this.getValue(paths[i]);
if (isMongooseObject(val)) {
orig[paths[i]] = val.toObject({ depopulate: true })
} else if (val.valueOf) {
orig[paths[i]] = val.valueOf();
} else {
orig[paths[i]] = val;
}
}
}
// Set up middleware support
for (var k in hooks) {
Document.prototype[k] = Document[k] = hooks[k];
}
/**
* Updates the document.
* Mirros the model update method.
*
* Example:
*
* weirdCar.update({$inc: {wheels:1}, fn);
*
* Valid options:
*
* - safe (boolean) safe mode (defaults to value set in schema (true))
* - upsert (boolean) whether to create the doc if it doesn't match (false)
*
* @param {Object} doc
* @param {Object} options
* @param {Function} callback
* @return {Query}
* @api public
*/
Document.prototype.update = function update () {
var args = utils.args(arguments);
args.unshift({_id: this._id});
this.constructor.update.apply(this.constructor, args);
}
/**
* Sets a path, or many paths
*
* Examples:
* // path, value
* doc.set(path, value)
*
* // object
* doc.set({
* path : value
* , path2 : {
* path : value
* }
* }
*
* @param {String|Object} key path, or object
* @param {Object} value, or undefined or a prefix if first parameter is an object
* @param @optional {Schema|String|...} specify a type if this is an on-the-fly attribute
* @api public
*/
Document.prototype.set = function (path, val, type) {
var constructing = true === type
, adhoc = type && true !== type
, adhocs
if (adhoc) {
adhocs = this._adhocPaths || (this._adhocPaths = {});
adhocs[path] = Schema.interpretAsType(path, type);
}
if ('string' !== typeof path) {
// new Document({ key: val })
if (null === path || undefined === path) {
var _ = path;
path = val;
val = _;
} else {
var prefix = val
? val + '.'
: '';
if (path instanceof Document) path = path._doc;
var keys = Object.keys(path)
, i = keys.length
, pathtype
, key
while (i--) {
key = keys[i];
if (null != path[key] && 'Object' === path[key].constructor.name
&& !(this._path(prefix + key) instanceof MixedSchema)) {
this.set(path[key], prefix + key, constructing);
} else if (this._strictMode) {
pathtype = this.schema.pathType(prefix + key);
if ('real' === pathtype || 'virtual' === pathtype) {
this.set(prefix + key, path[key], constructing);
} else if ('throw' == this._strictMode) {
throw new Error("Field `" + key + "` is not in schema.");
}
} else if (undefined !== path[key]) {
this.set(prefix + key, path[key], constructing);
}
}
return this;
}
}
// ensure _strict is honored for obj props
// docschema = new Schema({ path: { nest: 'string' }})
// doc.set('path', obj);
var pathType = this.schema.pathType(path);
if ('nested' == pathType && val && 'Object' == val.constructor.name) {
this.set(val, path, constructing);
return this;
}
var schema;
if ('adhocOrUndefined' == pathType && this._strictMode) {
return this;
} else if ('virtual' == pathType) {
schema = this.schema.virtualpath(path);
schema.applySetters(val, this);
return this;
} else {
schema = this._path(path);
}
var parts = path.split('.')
, pathToMark
// When using the $set operator the path to the field must already exist.
// Else mongodb throws: "LEFT_SUBFIELD only supports Object"
if (parts.length <= 1) {
pathToMark = path;
} else {
for (var i = 0; i < parts.length; ++i) {
var part = parts[i];
var subpath = parts.slice(0, i).concat(part).join('.');
if (this.isDirectModified(subpath) // earlier prefixes that are already
// marked as dirty have precedence
|| this.get(subpath) === null) {
pathToMark = subpath;
break;
}
}
if (!pathToMark) pathToMark = path;
}
if (!schema || null === val || undefined === val) {
this._set(pathToMark, path, constructing, parts, schema, val);
return this;
}
var self = this;
// if this doc is being constructed we should not
// trigger getters.
var priorVal = constructing
? undefined
: this.get(path);
var shouldSet = this.try(function(){
var casted = schema.cast(val, self, false, priorVal);
val = schema.applySetters(casted, self);
});
if (shouldSet) {
this._set(pathToMark, path, constructing, parts, schema, val, priorVal);
}
return this;
}
Document.prototype._set = function (pathToMark, path, constructing, parts, schema, val, priorVal) {
if (this.isNew) {
this.markModified(pathToMark);
} else {
priorVal || (priorVal = this.get(path));
if (!this.isDirectModified(pathToMark)) {
if (undefined === val && !this.isSelected(path)) {
// special case:
// when a path is not selected in a query its initial
// value will be undefined.
this.markModified(pathToMark, priorVal);
} else if (!deepEqual(val, priorVal)) {
this.markModified(pathToMark, priorVal);
} else if (!constructing &&
null != val &&
path in this._activePaths.states.default &&
deepEqual(val, schema.getDefault(this, constructing))) {
// special case:
// a path with a default was $unset on the server
// and the user is setting it to the same value again
this.markModified(pathToMark, priorVal);
}
}
}
var obj = this._doc
, i = 0
, l = parts.length
for (; i < l; i++) {
var next = i + 1
, last = next === l;
if (last) {
obj[parts[i]] = val;
} else {
if (obj[parts[i]] && 'Object' === obj[parts[i]].constructor.name) {
obj = obj[parts[i]];
} else if (obj[parts[i]] && Array.isArray(obj[parts[i]])) {
obj = obj[parts[i]];
} else {
obj = obj[parts[i]] = {};
}
}
}
};
/**
* Gets a raw value from a path (no getters)
*
* @param {String} path
* @api private
*/
Document.prototype.getValue = function (path) {
var parts = path.split('.')
, obj = this._doc
, part;
for (var i = 0, l = parts.length; i < l; i++) {
part = parts[i];
obj = obj.getValue
? obj.getValue(part) // If we have an embedded array document member
: obj[part];
if (!obj) return obj;
}
return obj;
}
/**
* Sets a raw value for a path (no casting, setters, transformations)
*
* @param {String} path
* @param {Object} value
* @api private
*/
Document.prototype.setValue = function (path, val) {
var parts = path.split('.')
, obj = this._doc;
for (var i = 0, len = parts.length-1; i < len; i++) {
obj = obj[parts[i]];
}
obj[parts[len]] = val;
return this;
};
/**
* Gets a path
*
* @param {String} key path
* @param @optional {Schema|String|...} specify a type if this is an on-the-fly attribute
* @api public
*/
Document.prototype.get = function (path, type) {
var adhocs;
if (type) {
adhocs = this._adhocPaths || (this._adhocPaths = {});
adhocs[path] = Schema.interpretAsType(path, type);
}
var schema = this._path(path) || this.schema.virtualpath(path)
, pieces = path.split('.')
, obj = this._doc;
for (var i = 0, l = pieces.length; i < l; i++) {
obj = null == obj ? null : obj[pieces[i]];
}
if (schema) {
obj = schema.applyGetters(obj, this);
}
return obj;
};
/**
* Finds the path in the ad hoc type schema list or
* in the schema's list of type schemas
* @param {String} path
* @api private
*/
Document.prototype._path = function (path) {
var adhocs = this._adhocPaths
, adhocType = adhocs && adhocs[path];
if (adhocType) {
return adhocType;
} else {
return this.schema.path(path);
}
};
/**
* Commits a path, marking as modified if needed. Useful for mixed keys
*
* @api public
*/
Document.prototype.markModified = function (path) {
this._activePaths.modify(path);
};
/**
* Captures an exception that will be bubbled to `save`
*
* @param {Function} function to execute
* @param {Object} scope
*/
Document.prototype.try = function (fn, scope) {
var res;
try {
fn.call(scope);
res = true;
} catch (e) {
this._error(e);
res = false;
}
return res;
};
/**
* modifiedPaths
*
* Returns the list of paths that have been modified.
*
* If we set `documents.0.title` to 'newTitle'
* then `documents`, `documents.0`, and `documents.0.title`
* are modified.
*
* @api public
* @returns Boolean
*/
Document.prototype.modifiedPaths = function () {
var directModifiedPaths = Object.keys(this._activePaths.states.modify);
return directModifiedPaths.reduce(function (list, path) {
var parts = path.split('.');
return list.concat(parts.reduce(function (chains, part, i) {
return chains.concat(parts.slice(0, i).concat(part).join('.'));
}, []));
}, []);
};
/**
* isModified
*
* If no path is given, returns is this document was modified.
*
* If a path is given, checks if a path or any full path
* containing path as part of its path chain has been
* directly modified.
*
* e.g., if we set `documents.0.title` to 'newTitle'
* then we have directly modified `documents.0.title`
* but not directly modified `documents` or `documents.0`.
* Nonetheless, we still say `documents` and `documents.0`
* are modified. They just are not considered direct modified.
* The distinction is important because we need to distinguish
* between what has been directly modified and what hasn't so
* that we can determine the MINIMUM set of dirty data
* that we want to send to MongoDB on a Document save.
*
* @param {String} path
* @returns Boolean
* @api public
*/
Document.prototype.isModified = function (path) {
return path
? !!~this.modifiedPaths().indexOf(path)
: this._activePaths.some('modify');
// TODO remove use of some()
};
/**
* Checks if a path has been directly set and modified. False if
* the path is only part of a larger path that was directly set.
*
* e.g., if we set `documents.0.title` to 'newTitle'
* then we have directly modified `documents.0.title`
* but not directly modified `documents` or `documents.0`.
* Nonetheless, we still say `documents` and `documents.0`
* are modified. They just are not considered direct modified.
* The distinction is important because we need to distinguish
* between what has been directly modified and what hasn't so
* that we can determine the MINIMUM set of dirty data
* that we want to send to MongoDB on a Document save.
*
* @param {String} path
* @returns Boolean
* @api public
*/
Document.prototype.isDirectModified = function (path) {
return (path in this._activePaths.states.modify);
};
/**
* Checks if a certain path was initialized
*
* @param {String} path
* @returns Boolean
* @api public
*/
Document.prototype.isInit = function (path) {
return (path in this._activePaths.states.init);
};
/**
* Checks if a path was selected.
* @param {String} path
* @return Boolean
* @api public
*/
Document.prototype.isSelected = function isSelected (path) {
if (this._selected) {
if ('_id' === path) {
return 0 !== this._selected._id;
}
var paths = Object.keys(this._selected)
, i = paths.length
, inclusive = false
, cur
if (1 === i && '_id' === paths[0]) {
// only _id was selected.
return 0 === this._selected._id;
}
while (i--) {
cur = paths[i];
if ('_id' == cur) continue;
inclusive = !! this._selected[cur];
break;
}
if (path in this._selected) {
return inclusive;
}
i = paths.length;
var pathDot = path + '.';
while (i--) {
cur = paths[i];
if ('_id' == cur) continue;
if (0 === cur.indexOf(pathDot)) {
return inclusive;
}
if (0 === pathDot.indexOf(cur)) {
return inclusive;
}
}
return ! inclusive;
}
return true;
}
/**
* Validation middleware
*
* @param {Function} next
* @api public
*/
Document.prototype.validate = function (next) {
var total = 0
, self = this
, validating = {}
if (!this._activePaths.some('require', 'init', 'modify')) {
return complete();
}
function complete () {
next(self._validationError);
self._validationError = null;
}
this._activePaths.forEach('require', 'init', 'modify', function validatePath (path) {
if (validating[path]) return;
validating[path] = true;
total++;
process.nextTick(function(){
var p = self.schema.path(path);
if (!p) return --total || complete();
p.doValidate(self.getValue(path), function (err) {
if (err) self.invalidate(path, err);
--total || complete();
}, self);
});
});
return this;
};
/**
* Marks a path as invalid, causing a subsequent validation to fail.
*
* @param {String} path of the field to invalidate
* @param {String/Error} error of the path.
* @api public
*/
Document.prototype.invalidate = function (path, err) {
if (!this._validationError) {
this._validationError = new ValidationError(this);
}
if (!err || 'string' === typeof err) {
err = new ValidatorError(path, err);
}
this._validationError.errors[path] = err;
}
/**
* Resets the atomics and modified states of this document.
*
* @private
* @return {this}
*/
Document.prototype._reset = function reset () {
var self = this;
DocumentArray || (DocumentArray = require('./types/documentarray'));
this._activePaths
.map('init', 'modify', function (i) {
return self.getValue(i);
})
.filter(function (val) {
return (val && val instanceof DocumentArray && val.length);
})
.forEach(function (array) {
array.forEach(function (doc) {
doc._reset();
});
});
// clear atomics
this._dirty().forEach(function (dirt) {
var type = dirt.value;
if (type && type._atomics) {
type._atomics = {};
}
});
// Clear 'modify'('dirty') cache
this._activePaths.clear('modify');
var self = this;
this.schema.requiredPaths().forEach(function (path) {
self._activePaths.require(path);
});
return this;
}
/**
* Returns the dirty paths / vals
*
* @api private
*/
Document.prototype._dirty = function _dirty () {
var self = this;
var all = this._activePaths.map('modify', function (path) {
return { path: path
, value: self.getValue(path)
, schema: self._path(path) };
});
// Sort dirty paths in a flat hierarchy.
all.sort(function (a, b) {
return (a.path < b.path ? -1 : (a.path > b.path ? 1 : 0));
});
// Ignore "foo.a" if "foo" is dirty already.
var minimal = []
, lastPath
, top;
all.forEach(function (item, i) {
if (item.path.indexOf(lastPath) !== 0) {
lastPath = item.path + '.';
minimal.push(item);
top = item;
} else {
// special case for top level MongooseArrays
if (top.value._atomics && top.value.hasAtomics()) {
// and the item is not a MongooseArray
if (!(item.value._atomics && item.value.hasAtomics())) {
// theres a sub path of top being explicitly set.
// the only way to honor all of their modifications
// is through a $set of entire array.
// change top to a $set op
top.value._atomics = {};
top.value._atomics.$set = top.value;
}
}
}
});
top = lastPath = null;
return minimal;
}
/**
* Compiles schemas.
* @api private
*/
function compile (tree, proto, prefix) {
var keys = Object.keys(tree)
, i = keys.length
, limb
, key;
while (i--) {
key = keys[i];
limb = tree[key];
define(key
, (('Object' === limb.constructor.name
&& Object.keys(limb).length)
&& (!limb.type || limb.type.type)
? limb
: null)
, proto
, prefix
, keys);
}
};
/**
* Defines the accessor named prop on the incoming prototype.
* @api private
*/
function define (prop, subprops, prototype, prefix, keys) {
var prefix = prefix || ''
, path = (prefix ? prefix + '.' : '') + prop;
if (subprops) {
Object.defineProperty(prototype, prop, {
enumerable: true
, get: function () {
if (!this.__getters)
this.__getters = {};
if (!this.__getters[path]) {
var nested = Object.create(this);
// save scope for nested getters/setters
if (!prefix) nested._scope = this;
// shadow inherited getters from sub-objects so
// thing.nested.nested.nested... doesn't occur (gh-366)
var i = 0
, len = keys.length;
for (; i < len; ++i) {
// over-write the parents getter without triggering it
Object.defineProperty(nested, keys[i], {
enumerable: false // It doesn't show up.
, writable: true // We can set it later.
, configurable: true // We can Object.defineProperty again.
, value: undefined // It shadows its parent.
});
}
nested.toObject = function () {
return this.get(path);
};
compile(subprops, nested, path);
this.__getters[path] = nested;
}
return this.__getters[path];
}
, set: function (v) {
return this.set(v, path);
}
});
} else {
Object.defineProperty(prototype, prop, {
enumerable: true
, get: function ( ) { return this.get.call(this._scope || this, path); }
, set: function (v) { return this.set.call(this._scope || this, path, v); }
});
}
};
/**
* We override the schema setter to compile accessors
*
* @api private
*/
Document.prototype.setSchema = function (schema) {
compile(schema.tree, this);
this.schema = schema;
}
/**
* Register default hooks
*
* @api private
*/
Document.prototype._registerHooks = function _registerHooks () {
if (!this.save) return;
DocumentArray || (DocumentArray = require('./types/documentarray'));
this.pre('save', function (next) {
// we keep the error semaphore to make sure we don't
// call `save` unnecessarily (we only need 1 error)
var subdocs = 0
, error = false
, self = this;
// check for DocumentArrays
var arrays = this._activePaths
.map('init', 'modify', function (i) {
return self.getValue(i);
})
.filter(function (val) {
return (val && val instanceof DocumentArray && val.length);
});
if (!arrays.length)
return next();
arrays.forEach(function (array) {
subdocs += array.length;
array.forEach(function (value) {
if (!error)
value.save(function (err) {
if (!error) {
if (err) {
error = true;
next(err);
} else
--subdocs || next();
}
});
});
});
}, function (err) {
this.db.emit('error', err);
}).pre('save', function checkForExistingErrors (next) {
// if any doc.set() calls failed
if (this._saveError) {
next(this._saveError);
this._saveError = null;
} else {
next();
}
}).pre('save', function validation (next) {
return this.validate(next);
});
// add user defined queues
this._doQueue();
};
/**
* Registers an error
*
* @param {Error} error
* @api private
*/
Document.prototype._error = function (err) {
this._saveError = err;
return this;
};
/**
* Executes methods queued from the Schema definition
*
* @api private
*/
Document.prototype._doQueue = function () {
var q = this.schema && this.schema.callQueue;
if (q) {
for (var i = 0, l = q.length; i < l; i++) {
this[q[i][0]].apply(this, q[i][1]);
}
}
return this;
};
/**
* Gets the document
*
* Available options:
*
* - getters: apply all getters (path and virtual getters)
* - virtuals: apply virtual getters (can override `getters` option)
* - minimize: remove empty objects (defaults to true)
*
* Example of only applying path getters:
*
* doc.toObject({ getters: true, virtuals: false })
*
* Example of only applying virtual getters:
*
* doc.toObject({ virtuals: true })
*
* Example of applying both path and virtual getters:
*
* doc.toObject({ getters: true })
*
* @return {Object} plain object
* @api public
*/
Document.prototype.toObject = function (options) {
// When internally saving this document we always pass options,
// bypassing the custom schema options.
if (!(options && 'Object' == options.constructor.name)) {
options = this.schema.options.toObject
? clone(this.schema.options.toObject)
: {};
}
;('minimize' in options) || (options.minimize = true);
var ret = clone(this._doc, options);
if (options.virtuals || options.getters && false !== options.virtuals) {
applyGetters(this, ret, 'virtuals', options);
}
if (options.getters) {
applyGetters(this, ret, 'paths', options);
}
return ret;
};
/**
* Applies virtuals properties to `json`.
*
* @param {Document} self
* @param {Object} json
* @param {String} either `virtuals` or `paths`
* @return json
* @private
*/
function applyGetters (self, json, type, options) {
var schema = self.schema
, paths = Object.keys(schema[type])
, i = paths.length
, path
while (i--) {
path = paths[i];
var parts = path.split('.')
, plen = parts.length
, last = plen - 1
, branch = json
, part
for (var ii = 0; ii < plen; ++ii) {
part = parts[ii];
if (ii === last) {
branch[part] = clone(self.get(path), options);
} else {
branch = branch[part] || (branch[part] = {});
}
}
}
return json;
}
/**
* JSON.stringify helper.
*
* Implicitly called when a document is passed
* to JSON.stringify()
*
* @return {Object}
* @api public
*/
Document.prototype.toJSON = function (options) {
// check for object type since an array of documents
// being stringified passes array indexes instead
// of options objects. JSON.stringify([doc, doc])
if (!(options && 'Object' == options.constructor.name)) {
options = this.schema.options.toJSON
? clone(this.schema.options.toJSON)
: {};
}
options.json = true;
return this.toObject(options);
};
/**
* Helper for console.log
*
* @api public
*/
Document.prototype.toString =
Document.prototype.inspect = function (options) {
var opts = options && 'Object' == options.constructor.name
? options
: undefined
return inspect(this.toObject(opts));
};
/**
* Returns true if the Document stores the same data as doc.
* @param {Document} doc to compare to
* @return {Boolean}
* @api public
*/
Document.prototype.equals = function (doc) {
return this.get('_id') === doc.get('_id');
};
/**
* Module exports.
*/
module.exports = Document;
/**
* Document Validation Error
*/
function ValidationError (instance) {
MongooseError.call(this, "Validation failed");
Error.captureStackTrace(this, arguments.callee);
this.name = 'ValidationError';
this.errors = instance.errors = {};
};
ValidationError.prototype.toString = function () {
return this.name + ': ' + Object.keys(this.errors).map(function (key) {
return String(this.errors[key]);
}, this).join(', ');
};
/**
* Inherits from MongooseError.
*/
ValidationError.prototype.__proto__ = MongooseError.prototype;
Document.ValidationError = ValidationError;
/**
* Document Error
*
* @param text
*/
function DocumentError () {
MongooseError.call(this, msg);
Error.captureStackTrace(this, arguments.callee);
this.name = 'DocumentError';
};
/**
* Inherits from MongooseError.
*/
DocumentError.prototype.__proto__ = MongooseError.prototype;
exports.Error = DocumentError;
| 1 | 11,576 | what about string/number etc _ids? this will fail. | Automattic-mongoose | js |
@@ -682,7 +682,7 @@ func TestRemoteMachineSetReconcile(t *testing.T) {
t.Errorf("machineset %v has unexpected labels:\nexpected: %v\nactual: %v", eMS.Name, eMS.Labels, rMS.Labels)
}
if !reflect.DeepEqual(eMS.ObjectMeta.Annotations, rMS.ObjectMeta.Annotations) {
- t.Errorf("machineset %v has unexpected annotations:\nexpected: %v\nactual: %v", eMS.Name, eMS.Labels, rMS.Labels)
+ t.Errorf("machineset %v has unexpected annotations:\nexpected: %v\nactual: %v", eMS.Name, eMS.Annotations, rMS.Annotations)
}
if !reflect.DeepEqual(eMS.Spec.Template.Spec.Labels, rMS.Spec.Template.Spec.Labels) {
t.Errorf("machineset %v machinespec has unexpected labels:\nexpected: %v\nactual: %v", eMS.Name, eMS.Spec.Template.Spec.Labels, rMS.Spec.Template.Spec.Labels) | 1 | package remotemachineset
import (
"context"
"encoding/json"
"fmt"
"reflect"
"testing"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/golang/mock/gomock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/utils/pointer"
awsproviderapis "sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1"
autoscalingv1 "github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1"
autoscalingv1beta1 "github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1beta1"
"github.com/openshift/hive/pkg/apis"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
hivev1aws "github.com/openshift/hive/pkg/apis/hive/v1/aws"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/hive/pkg/controller/remotemachineset/mock"
"github.com/openshift/hive/pkg/remoteclient"
remoteclientmock "github.com/openshift/hive/pkg/remoteclient/mock"
)
const (
testName = "foo"
testNamespace = "default"
testClusterID = "foo-12345-uuid"
testInfraID = "foo-12345"
machineAPINamespace = "openshift-machine-api"
testAMI = "ami-totallyfake"
testRegion = "test-region"
testPoolName = "worker"
testInstanceType = "test-instance-type"
)
func init() {
log.SetLevel(log.DebugLevel)
}
func TestRemoteMachineSetReconcile(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
machineapi.SchemeBuilder.AddToScheme(scheme.Scheme)
awsproviderapis.AddToScheme(scheme.Scheme)
getPool := func(c client.Client, poolName string) *hivev1.MachinePool {
pool := &hivev1.MachinePool{}
if err := c.Get(context.TODO(), client.ObjectKey{Namespace: testNamespace, Name: fmt.Sprintf("%s-%s", testName, poolName)}, pool); err != nil {
return nil
}
return pool
}
// Utility function to list test MachineSets from the fake client
getRMSL := func(rc client.Client) (*machineapi.MachineSetList, error) {
rMSL := &machineapi.MachineSetList{}
tm := metav1.TypeMeta{}
tm.SetGroupVersionKind(machineapi.SchemeGroupVersion.WithKind("MachineSet"))
err := rc.List(context.TODO(), rMSL, &client.ListOptions{Raw: &metav1.ListOptions{TypeMeta: tm}})
if err == nil {
return rMSL, err
}
return nil, err
}
// Utility function to list test MachineAutoscalers from the fake client
getRMAL := func(rc client.Client) (*autoscalingv1beta1.MachineAutoscalerList, error) {
rMAL := &autoscalingv1beta1.MachineAutoscalerList{}
tm := metav1.TypeMeta{}
tm.SetGroupVersionKind(autoscalingv1beta1.SchemeGroupVersion.WithKind("MachineAutoscaler"))
err := rc.List(context.TODO(), rMAL, &client.ListOptions{Raw: &metav1.ListOptions{TypeMeta: tm}})
if err == nil {
return rMAL, err
}
return nil, err
}
// Utility function to list test ClusterAutoscalers from the fake client
getRCAL := func(rc client.Client) (*autoscalingv1.ClusterAutoscalerList, error) {
rCAL := &autoscalingv1.ClusterAutoscalerList{}
tm := metav1.TypeMeta{}
tm.SetGroupVersionKind(autoscalingv1.SchemeGroupVersion.WithKind("ClusterAutoscaler"))
err := rc.List(context.TODO(), rCAL, &client.ListOptions{Raw: &metav1.ListOptions{TypeMeta: tm}})
if err == nil {
return rCAL, err
}
return nil, err
}
tests := []struct {
name string
clusterDeployment *hivev1.ClusterDeployment
machinePool *hivev1.MachinePool
remoteExisting []runtime.Object
generatedMachineSets []*machineapi.MachineSet
actuatorDoNotProceed bool
expectErr bool
expectNoFinalizer bool
expectedRemoteMachineSets []*machineapi.MachineSet
expectedRemoteMachineAutoscalers []autoscalingv1beta1.MachineAutoscaler
expectedRemoteClusterAutoscalers []autoscalingv1.ClusterAutoscaler
}{
{
name: "Cluster not installed yet",
clusterDeployment: func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = false
return cd
}(),
machinePool: testMachinePool(),
},
{
name: "No-op",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
},
{
name: "No-op when actuator says not to proceed",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
actuatorDoNotProceed: true,
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
},
{
name: "Update machine set replicas",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 0, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 1),
},
},
{
name: "Create missing machine set",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
},
{
name: "Skip create missing machine set when clusterDeployment has annotation hive.openshift.io/syncset-pause: true ",
clusterDeployment: func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Annotations = map[string]string{}
cd.Annotations[constants.SyncsetPauseAnnotation] = "true"
return cd
}(),
machinePool: testMachinePool(),
},
{
name: "Skip create missing machine set when cluster is unreachable",
clusterDeployment: func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.UnreachableCondition)
cond.Status = corev1.ConditionTrue
return cd
}(),
machinePool: testMachinePool(),
},
{
name: "Delete extra machine set",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1d", "worker", true, 1, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
},
{
name: "Other machinesets ignored",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 3, 0),
testMachineSet("foo-12345-other-us-east-1b", "other", true, 3, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 3, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 3, 0),
testMachineSet("foo-12345-other-us-east-1b", "other", true, 3, 0),
},
},
{
name: "Create additional machinepool machinesets",
clusterDeployment: testClusterDeployment(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-other-us-east-1a", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1b", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1c", "other", true, 1, 0),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
testMachineSet("foo-12345-other-us-east-1a", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1b", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1c", "other", true, 1, 0),
},
},
{
name: "Delete machinepool machinesets",
clusterDeployment: testClusterDeployment(),
machinePool: func() *hivev1.MachinePool {
mp := testMachinePool()
now := metav1.Now()
mp.DeletionTimestamp = &now
return mp
}(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-other-us-east-1a", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1b", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1c", "other", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectNoFinalizer: true,
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-other-us-east-1a", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1b", "other", true, 1, 0),
testMachineSet("foo-12345-other-us-east-1c", "other", true, 1, 0),
},
},
{
name: "No cluster deployment",
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectNoFinalizer: true,
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
},
{
name: "Deleted cluster deployment",
clusterDeployment: func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
machinePool: testMachinePool(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectNoFinalizer: true,
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
},
{
name: "No-op with auto-scaling",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testClusterAutoscaler("3"),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("3"),
},
},
{
name: "Create cluster autoscaler",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("1"),
},
},
{
name: "Update cluster autoscaler when missing scale down",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
func() runtime.Object {
a := testClusterAutoscaler("1")
a.Spec.ScaleDown = nil
return a
}(),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("2"),
},
},
{
name: "Update cluster autoscaler when scale down disabled",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
func() runtime.Object {
a := testClusterAutoscaler("1")
a.Spec.ScaleDown.Enabled = false
return a
}(),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("2"),
},
},
{
name: "Create machine autoscalers",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testClusterAutoscaler("1"),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("1"),
},
},
{
name: "Update machine autoscalers",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testClusterAutoscaler("1"),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 1),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 2, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "2", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "2", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("1"),
},
},
{
name: "Delete machine autoscalers",
clusterDeployment: testClusterDeployment(),
machinePool: testAutoscalingMachinePool(3, 5),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testClusterAutoscaler("1"),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
testMachineAutoscaler("foo-12345-worker-us-east-1d", "1", 1, 1),
},
generatedMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", false, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", false, 1, 0),
},
expectedRemoteMachineSets: []*machineapi.MachineSet{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
},
expectedRemoteMachineAutoscalers: []autoscalingv1beta1.MachineAutoscaler{
*testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
*testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("1"),
},
},
{
name: "Delete remote resources for deleted auto-scaling machinepool",
clusterDeployment: testClusterDeployment(),
machinePool: func() *hivev1.MachinePool {
mp := testAutoscalingMachinePool(3, 5)
now := metav1.Now()
mp.DeletionTimestamp = &now
return mp
}(),
remoteExisting: []runtime.Object{
testMachineSet("foo-12345-worker-us-east-1a", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1b", "worker", true, 1, 0),
testMachineSet("foo-12345-worker-us-east-1c", "worker", true, 1, 0),
testClusterAutoscaler("1"),
testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1b", "1", 1, 2),
testMachineAutoscaler("foo-12345-worker-us-east-1c", "1", 1, 1),
},
expectNoFinalizer: true,
expectedRemoteClusterAutoscalers: []autoscalingv1.ClusterAutoscaler{
*testClusterAutoscaler("1"),
},
},
}
for _, test := range tests {
apis.AddToScheme(scheme.Scheme)
machineapi.SchemeBuilder.AddToScheme(scheme.Scheme)
autoscalingv1.SchemeBuilder.AddToScheme(scheme.Scheme)
autoscalingv1beta1.SchemeBuilder.AddToScheme(scheme.Scheme)
t.Run(test.name, func(t *testing.T) {
localExisting := []runtime.Object{}
if test.clusterDeployment != nil {
localExisting = append(localExisting, test.clusterDeployment)
}
if test.machinePool != nil {
localExisting = append(localExisting, test.machinePool)
}
fakeClient := fake.NewFakeClient(localExisting...)
remoteFakeClient := fake.NewFakeClient(test.remoteExisting...)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockActuator := mock.NewMockActuator(mockCtrl)
if test.generatedMachineSets != nil {
mockActuator.EXPECT().
GenerateMachineSets(test.clusterDeployment, test.machinePool, gomock.Any()).
Return(test.generatedMachineSets, !test.actuatorDoNotProceed, nil)
}
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
mockRemoteClientBuilder.EXPECT().Build().Return(remoteFakeClient, nil).AnyTimes()
logger := log.WithField("controller", "remotemachineset")
controllerExpectations := controllerutils.NewExpectations(logger)
rcd := &ReconcileRemoteMachineSet{
Client: fakeClient,
scheme: scheme.Scheme,
logger: logger,
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
actuatorBuilder: func(cd *hivev1.ClusterDeployment, pool *hivev1.MachinePool, remoteMachineSets []machineapi.MachineSet, cdLog log.FieldLogger) (Actuator, error) {
return mockActuator, nil
},
expectations: controllerExpectations,
}
_, err := rcd.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: fmt.Sprintf("%s-worker", testName),
Namespace: testNamespace,
},
})
if test.expectErr {
if err == nil {
t.Errorf("expected error but got none")
}
// Should not proceed with test validations if we expected an error from the reconcile.
return
}
if err != nil && !test.expectErr {
t.Errorf("unexpected error: %v", err)
return
}
if pool := getPool(fakeClient, "worker"); assert.NotNil(t, pool, "missing machinepool") {
if test.expectNoFinalizer {
assert.NotContains(t, pool.Finalizers, finalizer, "unexpected finalizer")
} else {
assert.Contains(t, pool.Finalizers, finalizer, "missing finalizer")
}
}
rMSL, err := getRMSL(remoteFakeClient)
if assert.NoError(t, err) {
for _, eMS := range test.expectedRemoteMachineSets {
found := false
for _, rMS := range rMSL.Items {
if eMS.Name == rMS.Name {
found = true
assert.Equal(t, *eMS.Spec.Replicas, *rMS.Spec.Replicas)
assert.Equal(t, eMS.Generation, rMS.Generation)
if !reflect.DeepEqual(eMS.ObjectMeta.Labels, rMS.ObjectMeta.Labels) {
t.Errorf("machineset %v has unexpected labels:\nexpected: %v\nactual: %v", eMS.Name, eMS.Labels, rMS.Labels)
}
if !reflect.DeepEqual(eMS.ObjectMeta.Annotations, rMS.ObjectMeta.Annotations) {
t.Errorf("machineset %v has unexpected annotations:\nexpected: %v\nactual: %v", eMS.Name, eMS.Labels, rMS.Labels)
}
if !reflect.DeepEqual(eMS.Spec.Template.Spec.Labels, rMS.Spec.Template.Spec.Labels) {
t.Errorf("machineset %v machinespec has unexpected labels:\nexpected: %v\nactual: %v", eMS.Name, eMS.Spec.Template.Spec.Labels, rMS.Spec.Template.Spec.Labels)
}
if !reflect.DeepEqual(eMS.Spec.Template.Spec.Taints, rMS.Spec.Template.Spec.Taints) {
t.Errorf("machineset %v has unexpected taints:\nexpected: %v\nactual: %v", eMS.Name, eMS.Spec.Template.Spec.Taints, rMS.Spec.Template.Spec.Taints)
}
rAWSProviderSpec, _ := decodeAWSMachineProviderSpec(
rMS.Spec.Template.Spec.ProviderSpec.Value, scheme.Scheme)
log.Debugf("remote AWS: %v", printAWSMachineProviderConfig(rAWSProviderSpec))
assert.NotNil(t, rAWSProviderSpec)
eAWSProviderSpec, _ := decodeAWSMachineProviderSpec(
eMS.Spec.Template.Spec.ProviderSpec.Value, scheme.Scheme)
log.Debugf("expected AWS: %v", printAWSMachineProviderConfig(eAWSProviderSpec))
assert.NotNil(t, eAWSProviderSpec)
assert.Equal(t, eAWSProviderSpec.AMI, rAWSProviderSpec.AMI, "%s AMI does not match", eMS.Name)
}
}
if !found {
t.Errorf("did not find expected remote machineset: %v", eMS.Name)
}
}
for _, rMS := range rMSL.Items {
found := false
for _, eMS := range test.expectedRemoteMachineSets {
if rMS.Name == eMS.Name {
found = true
}
}
if !found {
t.Errorf("found unexpected remote machineset: %v", rMS.Name)
}
}
}
if rMAL, err := getRMAL(remoteFakeClient); assert.NoError(t, err, "error getting machine autoscalers") {
assert.ElementsMatch(t, test.expectedRemoteMachineAutoscalers, rMAL.Items, "unexpected remote machine autoscalers")
}
if rCAL, err := getRCAL(remoteFakeClient); assert.NoError(t, err, "error getting cluster autoscalers") {
assert.ElementsMatch(t, test.expectedRemoteClusterAutoscalers, rCAL.Items, "unexpected remote cluster autoscalers")
}
})
}
}
func testMachinePool() *hivev1.MachinePool {
return &hivev1.MachinePool{
TypeMeta: metav1.TypeMeta{
APIVersion: hivev1.SchemeGroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: fmt.Sprintf("%s-%s", testName, testPoolName),
Finalizers: []string{finalizer},
},
Spec: hivev1.MachinePoolSpec{
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: testName,
},
Name: testPoolName,
Replicas: pointer.Int64Ptr(3),
Platform: hivev1.MachinePoolPlatform{
AWS: &hivev1aws.MachinePoolPlatform{
InstanceType: testInstanceType,
},
},
Labels: map[string]string{
"machine.openshift.io/cluster-api-cluster": testInfraID,
"machine.openshift.io/cluster-api-machine-role": testPoolName,
"machine.openshift.io/cluster-api-machine-type": testPoolName,
},
Taints: []corev1.Taint{
{
Key: "foo",
Value: "bar",
Effect: corev1.TaintEffectNoSchedule,
},
},
},
}
}
func testAutoscalingMachinePool(min, max int) *hivev1.MachinePool {
p := testMachinePool()
p.Spec.Replicas = nil
p.Spec.Autoscaling = &hivev1.MachinePoolAutoscaling{
MinReplicas: int32(min),
MaxReplicas: int32(max),
}
return p
}
func testMachineSet(name string, machineType string, unstompedAnnotation bool, replicas int, generation int) *machineapi.MachineSet {
msReplicas := int32(replicas)
awsProviderSpec := &awsprovider.AWSMachineProviderConfig{
TypeMeta: metav1.TypeMeta{
Kind: "AWSMachineProviderConfig",
APIVersion: awsprovider.SchemeGroupVersion.String(),
},
AMI: awsprovider.AWSResourceReference{
ID: aws.String(testAMI),
},
}
rawAWSProviderSpec, err := encodeAWSMachineProviderSpec(awsProviderSpec, scheme.Scheme)
if err != nil {
log.WithError(err).Fatal("error encoding AWS machine provider spec")
}
ms := machineapi.MachineSet{
TypeMeta: metav1.TypeMeta{
APIVersion: machineapi.SchemeGroupVersion.String(),
Kind: "MachineSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: machineAPINamespace,
Labels: map[string]string{
machinePoolNameLabel: machineType,
"machine.openshift.io/cluster-api-cluster": testInfraID,
},
Generation: int64(generation),
Annotations: map[string]string{
constants.HiveManagedAnnotation: "true",
},
},
Spec: machineapi.MachineSetSpec{
Replicas: &msReplicas,
Template: machineapi.MachineTemplateSpec{
Spec: machineapi.MachineSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"machine.openshift.io/cluster-api-cluster": testInfraID,
"machine.openshift.io/cluster-api-machine-role": machineType,
"machine.openshift.io/cluster-api-machine-type": machineType,
},
},
ProviderSpec: machineapi.ProviderSpec{
Value: rawAWSProviderSpec,
},
Taints: []corev1.Taint{
{
Key: "foo",
Value: "bar",
Effect: corev1.TaintEffectNoSchedule,
},
},
},
},
},
}
// Add a pre-existing annotation which we will ensure remains in updated machinesets.
if unstompedAnnotation {
ms.Annotations["hive.openshift.io/unstomped"] = "true"
}
return &ms
}
func testMachineAutoscaler(name string, resourceVersion string, min, max int) *autoscalingv1beta1.MachineAutoscaler {
return &autoscalingv1beta1.MachineAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Namespace: "openshift-machine-api",
Name: name,
ResourceVersion: resourceVersion,
Labels: map[string]string{
machinePoolNameLabel: "worker",
},
},
Spec: autoscalingv1beta1.MachineAutoscalerSpec{
MinReplicas: int32(min),
MaxReplicas: int32(max),
ScaleTargetRef: autoscalingv1beta1.CrossVersionObjectReference{
APIVersion: machineapi.SchemeGroupVersion.String(),
Kind: "MachineSet",
Name: name,
},
},
}
}
func testClusterAutoscaler(resourceVersion string) *autoscalingv1.ClusterAutoscaler {
return &autoscalingv1.ClusterAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
ResourceVersion: resourceVersion,
},
Spec: autoscalingv1.ClusterAutoscalerSpec{
ScaleDown: &autoscalingv1.ScaleDownConfig{
Enabled: true,
},
},
}
}
func testClusterDeployment() *hivev1.ClusterDeployment {
return &hivev1.ClusterDeployment{
TypeMeta: metav1.TypeMeta{
APIVersion: hivev1.SchemeGroupVersion.String(),
Kind: "ClusterDeployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Finalizers: []string{hivev1.FinalizerDeprovision},
UID: types.UID("1234"),
},
Spec: hivev1.ClusterDeploymentSpec{
ClusterName: testName,
Platform: hivev1.Platform{
AWS: &hivev1aws.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: "aws-credentials",
},
Region: testRegion,
},
},
ClusterMetadata: &hivev1.ClusterMetadata{
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: fmt.Sprintf("%s-admin-kubeconfig", testName)},
ClusterID: testClusterID,
InfraID: testInfraID,
},
Installed: true,
},
Status: hivev1.ClusterDeploymentStatus{
Conditions: []hivev1.ClusterDeploymentCondition{{
Type: hivev1.UnreachableCondition,
Status: corev1.ConditionFalse,
}},
},
}
}
func printAWSMachineProviderConfig(cfg *awsprovider.AWSMachineProviderConfig) string {
b, err := json.Marshal(cfg)
if err != nil {
panic(err.Error())
}
return string(b)
}
| 1 | 12,987 | Looks like the test had a bug prior to this. | openshift-hive | go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.